1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Support PCI/PCIe on PowerNV platforms 4 * 5 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp. 6 */ 7 8 #undef DEBUG 9 10 #include <linux/kernel.h> 11 #include <linux/pci.h> 12 #include <linux/crash_dump.h> 13 #include <linux/delay.h> 14 #include <linux/string.h> 15 #include <linux/init.h> 16 #include <linux/memblock.h> 17 #include <linux/irq.h> 18 #include <linux/io.h> 19 #include <linux/msi.h> 20 #include <linux/iommu.h> 21 #include <linux/rculist.h> 22 #include <linux/sizes.h> 23 #include <linux/debugfs.h> 24 #include <linux/of_address.h> 25 #include <linux/of_irq.h> 26 27 #include <asm/sections.h> 28 #include <asm/io.h> 29 #include <asm/pci-bridge.h> 30 #include <asm/machdep.h> 31 #include <asm/msi_bitmap.h> 32 #include <asm/ppc-pci.h> 33 #include <asm/opal.h> 34 #include <asm/iommu.h> 35 #include <asm/tce.h> 36 #include <asm/xics.h> 37 #include <asm/firmware.h> 38 #include <asm/pnv-pci.h> 39 #include <asm/mmzone.h> 40 #include <asm/xive.h> 41 42 #include <misc/cxl-base.h> 43 44 #include "powernv.h" 45 #include "pci.h" 46 #include "../../../../drivers/pci/pci.h" 47 48 static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_OCAPI" }; 49 50 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable); 51 static void pnv_pci_configure_bus(struct pci_bus *bus); 52 53 void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, 54 const char *fmt, ...) 55 { 56 struct va_format vaf; 57 va_list args; 58 char pfix[32]; 59 60 va_start(args, fmt); 61 62 vaf.fmt = fmt; 63 vaf.va = &args; 64 65 if (pe->flags & PNV_IODA_PE_DEV) 66 strscpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix)); 67 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) 68 sprintf(pfix, "%04x:%02x ", 69 pci_domain_nr(pe->pbus), pe->pbus->number); 70 #ifdef CONFIG_PCI_IOV 71 else if (pe->flags & PNV_IODA_PE_VF) 72 sprintf(pfix, "%04x:%02x:%2x.%d", 73 pci_domain_nr(pe->parent_dev->bus), 74 (pe->rid & 0xff00) >> 8, 75 PCI_SLOT(pe->rid), PCI_FUNC(pe->rid)); 76 #endif /* CONFIG_PCI_IOV*/ 77 78 printk("%spci %s: [PE# %.2x] %pV", 79 level, pfix, pe->pe_number, &vaf); 80 81 va_end(args); 82 } 83 84 static bool pnv_iommu_bypass_disabled __read_mostly; 85 static bool pci_reset_phbs __read_mostly; 86 87 static int __init iommu_setup(char *str) 88 { 89 if (!str) 90 return -EINVAL; 91 92 while (*str) { 93 if (!strncmp(str, "nobypass", 8)) { 94 pnv_iommu_bypass_disabled = true; 95 pr_info("PowerNV: IOMMU bypass window disabled.\n"); 96 break; 97 } 98 str += strcspn(str, ","); 99 if (*str == ',') 100 str++; 101 } 102 103 return 0; 104 } 105 early_param("iommu", iommu_setup); 106 107 static int __init pci_reset_phbs_setup(char *str) 108 { 109 pci_reset_phbs = true; 110 return 0; 111 } 112 113 early_param("ppc_pci_reset_phbs", pci_reset_phbs_setup); 114 115 static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no) 116 { 117 s64 rc; 118 119 phb->ioda.pe_array[pe_no].phb = phb; 120 phb->ioda.pe_array[pe_no].pe_number = pe_no; 121 phb->ioda.pe_array[pe_no].dma_setup_done = false; 122 123 /* 124 * Clear the PE frozen state as it might be put into frozen state 125 * in the last PCI remove path. It's not harmful to do so when the 126 * PE is already in unfrozen state. 127 */ 128 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, 129 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 130 if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED) 131 pr_warn("%s: Error %lld unfreezing PHB#%x-PE#%x\n", 132 __func__, rc, phb->hose->global_number, pe_no); 133 134 return &phb->ioda.pe_array[pe_no]; 135 } 136 137 static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no) 138 { 139 if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe_num)) { 140 pr_warn("%s: Invalid PE %x on PHB#%x\n", 141 __func__, pe_no, phb->hose->global_number); 142 return; 143 } 144 145 mutex_lock(&phb->ioda.pe_alloc_mutex); 146 if (test_and_set_bit(pe_no, phb->ioda.pe_alloc)) 147 pr_debug("%s: PE %x was reserved on PHB#%x\n", 148 __func__, pe_no, phb->hose->global_number); 149 mutex_unlock(&phb->ioda.pe_alloc_mutex); 150 151 pnv_ioda_init_pe(phb, pe_no); 152 } 153 154 struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb, int count) 155 { 156 struct pnv_ioda_pe *ret = NULL; 157 int run = 0, pe, i; 158 159 mutex_lock(&phb->ioda.pe_alloc_mutex); 160 161 /* scan backwards for a run of @count cleared bits */ 162 for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) { 163 if (test_bit(pe, phb->ioda.pe_alloc)) { 164 run = 0; 165 continue; 166 } 167 168 run++; 169 if (run == count) 170 break; 171 } 172 if (run != count) 173 goto out; 174 175 for (i = pe; i < pe + count; i++) { 176 set_bit(i, phb->ioda.pe_alloc); 177 pnv_ioda_init_pe(phb, i); 178 } 179 ret = &phb->ioda.pe_array[pe]; 180 181 out: 182 mutex_unlock(&phb->ioda.pe_alloc_mutex); 183 return ret; 184 } 185 186 void pnv_ioda_free_pe(struct pnv_ioda_pe *pe) 187 { 188 struct pnv_phb *phb = pe->phb; 189 unsigned int pe_num = pe->pe_number; 190 191 WARN_ON(pe->pdev); 192 memset(pe, 0, sizeof(struct pnv_ioda_pe)); 193 194 mutex_lock(&phb->ioda.pe_alloc_mutex); 195 clear_bit(pe_num, phb->ioda.pe_alloc); 196 mutex_unlock(&phb->ioda.pe_alloc_mutex); 197 } 198 199 /* The default M64 BAR is shared by all PEs */ 200 static int pnv_ioda2_init_m64(struct pnv_phb *phb) 201 { 202 const char *desc; 203 struct resource *r; 204 s64 rc; 205 206 /* Configure the default M64 BAR */ 207 rc = opal_pci_set_phb_mem_window(phb->opal_id, 208 OPAL_M64_WINDOW_TYPE, 209 phb->ioda.m64_bar_idx, 210 phb->ioda.m64_base, 211 0, /* unused */ 212 phb->ioda.m64_size); 213 if (rc != OPAL_SUCCESS) { 214 desc = "configuring"; 215 goto fail; 216 } 217 218 /* Enable the default M64 BAR */ 219 rc = opal_pci_phb_mmio_enable(phb->opal_id, 220 OPAL_M64_WINDOW_TYPE, 221 phb->ioda.m64_bar_idx, 222 OPAL_ENABLE_M64_SPLIT); 223 if (rc != OPAL_SUCCESS) { 224 desc = "enabling"; 225 goto fail; 226 } 227 228 /* 229 * Exclude the segments for reserved and root bus PE, which 230 * are first or last two PEs. 231 */ 232 r = &phb->hose->mem_resources[1]; 233 if (phb->ioda.reserved_pe_idx == 0) 234 r->start += (2 * phb->ioda.m64_segsize); 235 else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) 236 r->end -= (2 * phb->ioda.m64_segsize); 237 else 238 pr_warn(" Cannot strip M64 segment for reserved PE#%x\n", 239 phb->ioda.reserved_pe_idx); 240 241 return 0; 242 243 fail: 244 pr_warn(" Failure %lld %s M64 BAR#%d\n", 245 rc, desc, phb->ioda.m64_bar_idx); 246 opal_pci_phb_mmio_enable(phb->opal_id, 247 OPAL_M64_WINDOW_TYPE, 248 phb->ioda.m64_bar_idx, 249 OPAL_DISABLE_M64); 250 return -EIO; 251 } 252 253 static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev, 254 unsigned long *pe_bitmap) 255 { 256 struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); 257 struct resource *r; 258 resource_size_t base, sgsz, start, end; 259 int segno, i; 260 261 base = phb->ioda.m64_base; 262 sgsz = phb->ioda.m64_segsize; 263 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 264 r = &pdev->resource[i]; 265 if (!r->parent || !pnv_pci_is_m64(phb, r)) 266 continue; 267 268 start = ALIGN_DOWN(r->start - base, sgsz); 269 end = ALIGN(r->end - base, sgsz); 270 for (segno = start / sgsz; segno < end / sgsz; segno++) { 271 if (pe_bitmap) 272 set_bit(segno, pe_bitmap); 273 else 274 pnv_ioda_reserve_pe(phb, segno); 275 } 276 } 277 } 278 279 static void pnv_ioda_reserve_m64_pe(struct pci_bus *bus, 280 unsigned long *pe_bitmap, 281 bool all) 282 { 283 struct pci_dev *pdev; 284 285 list_for_each_entry(pdev, &bus->devices, bus_list) { 286 pnv_ioda_reserve_dev_m64_pe(pdev, pe_bitmap); 287 288 if (all && pdev->subordinate) 289 pnv_ioda_reserve_m64_pe(pdev->subordinate, 290 pe_bitmap, all); 291 } 292 } 293 294 static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all) 295 { 296 struct pnv_phb *phb = pci_bus_to_pnvhb(bus); 297 struct pnv_ioda_pe *master_pe, *pe; 298 unsigned long size, *pe_alloc; 299 int i; 300 301 /* Root bus shouldn't use M64 */ 302 if (pci_is_root_bus(bus)) 303 return NULL; 304 305 /* Allocate bitmap */ 306 size = ALIGN(phb->ioda.total_pe_num / 8, sizeof(unsigned long)); 307 pe_alloc = kzalloc(size, GFP_KERNEL); 308 if (!pe_alloc) { 309 pr_warn("%s: Out of memory !\n", 310 __func__); 311 return NULL; 312 } 313 314 /* Figure out reserved PE numbers by the PE */ 315 pnv_ioda_reserve_m64_pe(bus, pe_alloc, all); 316 317 /* 318 * the current bus might not own M64 window and that's all 319 * contributed by its child buses. For the case, we needn't 320 * pick M64 dependent PE#. 321 */ 322 if (bitmap_empty(pe_alloc, phb->ioda.total_pe_num)) { 323 kfree(pe_alloc); 324 return NULL; 325 } 326 327 /* 328 * Figure out the master PE and put all slave PEs to master 329 * PE's list to form compound PE. 330 */ 331 master_pe = NULL; 332 i = -1; 333 while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe_num, i + 1)) < 334 phb->ioda.total_pe_num) { 335 pe = &phb->ioda.pe_array[i]; 336 337 phb->ioda.m64_segmap[pe->pe_number] = pe->pe_number; 338 if (!master_pe) { 339 pe->flags |= PNV_IODA_PE_MASTER; 340 INIT_LIST_HEAD(&pe->slaves); 341 master_pe = pe; 342 } else { 343 pe->flags |= PNV_IODA_PE_SLAVE; 344 pe->master = master_pe; 345 list_add_tail(&pe->list, &master_pe->slaves); 346 } 347 } 348 349 kfree(pe_alloc); 350 return master_pe; 351 } 352 353 static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb) 354 { 355 struct pci_controller *hose = phb->hose; 356 struct device_node *dn = hose->dn; 357 struct resource *res; 358 u32 m64_range[2], i; 359 const __be32 *r; 360 u64 pci_addr; 361 362 if (phb->type != PNV_PHB_IODA1 && phb->type != PNV_PHB_IODA2) { 363 pr_info(" Not support M64 window\n"); 364 return; 365 } 366 367 if (!firmware_has_feature(FW_FEATURE_OPAL)) { 368 pr_info(" Firmware too old to support M64 window\n"); 369 return; 370 } 371 372 r = of_get_property(dn, "ibm,opal-m64-window", NULL); 373 if (!r) { 374 pr_info(" No <ibm,opal-m64-window> on %pOF\n", 375 dn); 376 return; 377 } 378 379 /* 380 * Find the available M64 BAR range and pickup the last one for 381 * covering the whole 64-bits space. We support only one range. 382 */ 383 if (of_property_read_u32_array(dn, "ibm,opal-available-m64-ranges", 384 m64_range, 2)) { 385 /* In absence of the property, assume 0..15 */ 386 m64_range[0] = 0; 387 m64_range[1] = 16; 388 } 389 /* We only support 64 bits in our allocator */ 390 if (m64_range[1] > 63) { 391 pr_warn("%s: Limiting M64 range to 63 (from %d) on PHB#%x\n", 392 __func__, m64_range[1], phb->hose->global_number); 393 m64_range[1] = 63; 394 } 395 /* Empty range, no m64 */ 396 if (m64_range[1] <= m64_range[0]) { 397 pr_warn("%s: M64 empty, disabling M64 usage on PHB#%x\n", 398 __func__, phb->hose->global_number); 399 return; 400 } 401 402 /* Configure M64 informations */ 403 res = &hose->mem_resources[1]; 404 res->name = dn->full_name; 405 res->start = of_translate_address(dn, r + 2); 406 res->end = res->start + of_read_number(r + 4, 2) - 1; 407 res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH); 408 pci_addr = of_read_number(r, 2); 409 hose->mem_offset[1] = res->start - pci_addr; 410 411 phb->ioda.m64_size = resource_size(res); 412 phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe_num; 413 phb->ioda.m64_base = pci_addr; 414 415 /* This lines up nicely with the display from processing OF ranges */ 416 pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx (M64 #%d..%d)\n", 417 res->start, res->end, pci_addr, m64_range[0], 418 m64_range[0] + m64_range[1] - 1); 419 420 /* Mark all M64 used up by default */ 421 phb->ioda.m64_bar_alloc = (unsigned long)-1; 422 423 /* Use last M64 BAR to cover M64 window */ 424 m64_range[1]--; 425 phb->ioda.m64_bar_idx = m64_range[0] + m64_range[1]; 426 427 pr_info(" Using M64 #%d as default window\n", phb->ioda.m64_bar_idx); 428 429 /* Mark remaining ones free */ 430 for (i = m64_range[0]; i < m64_range[1]; i++) 431 clear_bit(i, &phb->ioda.m64_bar_alloc); 432 433 /* 434 * Setup init functions for M64 based on IODA version, IODA3 uses 435 * the IODA2 code. 436 */ 437 phb->init_m64 = pnv_ioda2_init_m64; 438 } 439 440 static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no) 441 { 442 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no]; 443 struct pnv_ioda_pe *slave; 444 s64 rc; 445 446 /* Fetch master PE */ 447 if (pe->flags & PNV_IODA_PE_SLAVE) { 448 pe = pe->master; 449 if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER))) 450 return; 451 452 pe_no = pe->pe_number; 453 } 454 455 /* Freeze master PE */ 456 rc = opal_pci_eeh_freeze_set(phb->opal_id, 457 pe_no, 458 OPAL_EEH_ACTION_SET_FREEZE_ALL); 459 if (rc != OPAL_SUCCESS) { 460 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n", 461 __func__, rc, phb->hose->global_number, pe_no); 462 return; 463 } 464 465 /* Freeze slave PEs */ 466 if (!(pe->flags & PNV_IODA_PE_MASTER)) 467 return; 468 469 list_for_each_entry(slave, &pe->slaves, list) { 470 rc = opal_pci_eeh_freeze_set(phb->opal_id, 471 slave->pe_number, 472 OPAL_EEH_ACTION_SET_FREEZE_ALL); 473 if (rc != OPAL_SUCCESS) 474 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n", 475 __func__, rc, phb->hose->global_number, 476 slave->pe_number); 477 } 478 } 479 480 static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt) 481 { 482 struct pnv_ioda_pe *pe, *slave; 483 s64 rc; 484 485 /* Find master PE */ 486 pe = &phb->ioda.pe_array[pe_no]; 487 if (pe->flags & PNV_IODA_PE_SLAVE) { 488 pe = pe->master; 489 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)); 490 pe_no = pe->pe_number; 491 } 492 493 /* Clear frozen state for master PE */ 494 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, opt); 495 if (rc != OPAL_SUCCESS) { 496 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n", 497 __func__, rc, opt, phb->hose->global_number, pe_no); 498 return -EIO; 499 } 500 501 if (!(pe->flags & PNV_IODA_PE_MASTER)) 502 return 0; 503 504 /* Clear frozen state for slave PEs */ 505 list_for_each_entry(slave, &pe->slaves, list) { 506 rc = opal_pci_eeh_freeze_clear(phb->opal_id, 507 slave->pe_number, 508 opt); 509 if (rc != OPAL_SUCCESS) { 510 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n", 511 __func__, rc, opt, phb->hose->global_number, 512 slave->pe_number); 513 return -EIO; 514 } 515 } 516 517 return 0; 518 } 519 520 static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no) 521 { 522 struct pnv_ioda_pe *slave, *pe; 523 u8 fstate = 0, state; 524 __be16 pcierr = 0; 525 s64 rc; 526 527 /* Sanity check on PE number */ 528 if (pe_no < 0 || pe_no >= phb->ioda.total_pe_num) 529 return OPAL_EEH_STOPPED_PERM_UNAVAIL; 530 531 /* 532 * Fetch the master PE and the PE instance might be 533 * not initialized yet. 534 */ 535 pe = &phb->ioda.pe_array[pe_no]; 536 if (pe->flags & PNV_IODA_PE_SLAVE) { 537 pe = pe->master; 538 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)); 539 pe_no = pe->pe_number; 540 } 541 542 /* Check the master PE */ 543 rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no, 544 &state, &pcierr, NULL); 545 if (rc != OPAL_SUCCESS) { 546 pr_warn("%s: Failure %lld getting " 547 "PHB#%x-PE#%x state\n", 548 __func__, rc, 549 phb->hose->global_number, pe_no); 550 return OPAL_EEH_STOPPED_TEMP_UNAVAIL; 551 } 552 553 /* Check the slave PE */ 554 if (!(pe->flags & PNV_IODA_PE_MASTER)) 555 return state; 556 557 list_for_each_entry(slave, &pe->slaves, list) { 558 rc = opal_pci_eeh_freeze_status(phb->opal_id, 559 slave->pe_number, 560 &fstate, 561 &pcierr, 562 NULL); 563 if (rc != OPAL_SUCCESS) { 564 pr_warn("%s: Failure %lld getting " 565 "PHB#%x-PE#%x state\n", 566 __func__, rc, 567 phb->hose->global_number, slave->pe_number); 568 return OPAL_EEH_STOPPED_TEMP_UNAVAIL; 569 } 570 571 /* 572 * Override the result based on the ascending 573 * priority. 574 */ 575 if (fstate > state) 576 state = fstate; 577 } 578 579 return state; 580 } 581 582 struct pnv_ioda_pe *pnv_pci_bdfn_to_pe(struct pnv_phb *phb, u16 bdfn) 583 { 584 int pe_number = phb->ioda.pe_rmap[bdfn]; 585 586 if (pe_number == IODA_INVALID_PE) 587 return NULL; 588 589 return &phb->ioda.pe_array[pe_number]; 590 } 591 592 struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev) 593 { 594 struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); 595 struct pci_dn *pdn = pci_get_pdn(dev); 596 597 if (!pdn) 598 return NULL; 599 if (pdn->pe_number == IODA_INVALID_PE) 600 return NULL; 601 return &phb->ioda.pe_array[pdn->pe_number]; 602 } 603 604 static int pnv_ioda_set_one_peltv(struct pnv_phb *phb, 605 struct pnv_ioda_pe *parent, 606 struct pnv_ioda_pe *child, 607 bool is_add) 608 { 609 const char *desc = is_add ? "adding" : "removing"; 610 uint8_t op = is_add ? OPAL_ADD_PE_TO_DOMAIN : 611 OPAL_REMOVE_PE_FROM_DOMAIN; 612 struct pnv_ioda_pe *slave; 613 long rc; 614 615 /* Parent PE affects child PE */ 616 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number, 617 child->pe_number, op); 618 if (rc != OPAL_SUCCESS) { 619 pe_warn(child, "OPAL error %ld %s to parent PELTV\n", 620 rc, desc); 621 return -ENXIO; 622 } 623 624 if (!(child->flags & PNV_IODA_PE_MASTER)) 625 return 0; 626 627 /* Compound case: parent PE affects slave PEs */ 628 list_for_each_entry(slave, &child->slaves, list) { 629 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number, 630 slave->pe_number, op); 631 if (rc != OPAL_SUCCESS) { 632 pe_warn(slave, "OPAL error %ld %s to parent PELTV\n", 633 rc, desc); 634 return -ENXIO; 635 } 636 } 637 638 return 0; 639 } 640 641 static int pnv_ioda_set_peltv(struct pnv_phb *phb, 642 struct pnv_ioda_pe *pe, 643 bool is_add) 644 { 645 struct pnv_ioda_pe *slave; 646 struct pci_dev *pdev = NULL; 647 int ret; 648 649 /* 650 * Clear PE frozen state. If it's master PE, we need 651 * clear slave PE frozen state as well. 652 */ 653 if (is_add) { 654 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, 655 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 656 if (pe->flags & PNV_IODA_PE_MASTER) { 657 list_for_each_entry(slave, &pe->slaves, list) 658 opal_pci_eeh_freeze_clear(phb->opal_id, 659 slave->pe_number, 660 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 661 } 662 } 663 664 /* 665 * Associate PE in PELT. We need add the PE into the 666 * corresponding PELT-V as well. Otherwise, the error 667 * originated from the PE might contribute to other 668 * PEs. 669 */ 670 ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add); 671 if (ret) 672 return ret; 673 674 /* For compound PEs, any one affects all of them */ 675 if (pe->flags & PNV_IODA_PE_MASTER) { 676 list_for_each_entry(slave, &pe->slaves, list) { 677 ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add); 678 if (ret) 679 return ret; 680 } 681 } 682 683 if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS)) 684 pdev = pe->pbus->self; 685 else if (pe->flags & PNV_IODA_PE_DEV) 686 pdev = pe->pdev->bus->self; 687 #ifdef CONFIG_PCI_IOV 688 else if (pe->flags & PNV_IODA_PE_VF) 689 pdev = pe->parent_dev; 690 #endif /* CONFIG_PCI_IOV */ 691 while (pdev) { 692 struct pci_dn *pdn = pci_get_pdn(pdev); 693 struct pnv_ioda_pe *parent; 694 695 if (pdn && pdn->pe_number != IODA_INVALID_PE) { 696 parent = &phb->ioda.pe_array[pdn->pe_number]; 697 ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add); 698 if (ret) 699 return ret; 700 } 701 702 pdev = pdev->bus->self; 703 } 704 705 return 0; 706 } 707 708 static void pnv_ioda_unset_peltv(struct pnv_phb *phb, 709 struct pnv_ioda_pe *pe, 710 struct pci_dev *parent) 711 { 712 int64_t rc; 713 714 while (parent) { 715 struct pci_dn *pdn = pci_get_pdn(parent); 716 717 if (pdn && pdn->pe_number != IODA_INVALID_PE) { 718 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number, 719 pe->pe_number, 720 OPAL_REMOVE_PE_FROM_DOMAIN); 721 /* XXX What to do in case of error ? */ 722 } 723 parent = parent->bus->self; 724 } 725 726 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, 727 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 728 729 /* Disassociate PE in PELT */ 730 rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number, 731 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN); 732 if (rc) 733 pe_warn(pe, "OPAL error %lld remove self from PELTV\n", rc); 734 } 735 736 int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) 737 { 738 struct pci_dev *parent; 739 uint8_t bcomp, dcomp, fcomp; 740 int64_t rc; 741 long rid_end, rid; 742 743 /* Currently, we just deconfigure VF PE. Bus PE will always there.*/ 744 if (pe->pbus) { 745 int count; 746 747 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER; 748 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER; 749 parent = pe->pbus->self; 750 if (pe->flags & PNV_IODA_PE_BUS_ALL) 751 count = resource_size(&pe->pbus->busn_res); 752 else 753 count = 1; 754 755 switch(count) { 756 case 1: bcomp = OpalPciBusAll; break; 757 case 2: bcomp = OpalPciBus7Bits; break; 758 case 4: bcomp = OpalPciBus6Bits; break; 759 case 8: bcomp = OpalPciBus5Bits; break; 760 case 16: bcomp = OpalPciBus4Bits; break; 761 case 32: bcomp = OpalPciBus3Bits; break; 762 default: 763 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n", 764 count); 765 /* Do an exact match only */ 766 bcomp = OpalPciBusAll; 767 } 768 rid_end = pe->rid + (count << 8); 769 } else { 770 #ifdef CONFIG_PCI_IOV 771 if (pe->flags & PNV_IODA_PE_VF) 772 parent = pe->parent_dev; 773 else 774 #endif 775 parent = pe->pdev->bus->self; 776 bcomp = OpalPciBusAll; 777 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER; 778 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER; 779 rid_end = pe->rid + 1; 780 } 781 782 /* Clear the reverse map */ 783 for (rid = pe->rid; rid < rid_end; rid++) 784 phb->ioda.pe_rmap[rid] = IODA_INVALID_PE; 785 786 /* 787 * Release from all parents PELT-V. NPUs don't have a PELTV 788 * table 789 */ 790 if (phb->type != PNV_PHB_NPU_OCAPI) 791 pnv_ioda_unset_peltv(phb, pe, parent); 792 793 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, 794 bcomp, dcomp, fcomp, OPAL_UNMAP_PE); 795 if (rc) 796 pe_err(pe, "OPAL error %lld trying to setup PELT table\n", rc); 797 798 pe->pbus = NULL; 799 pe->pdev = NULL; 800 #ifdef CONFIG_PCI_IOV 801 pe->parent_dev = NULL; 802 #endif 803 804 return 0; 805 } 806 807 int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) 808 { 809 uint8_t bcomp, dcomp, fcomp; 810 long rc, rid_end, rid; 811 812 /* Bus validation ? */ 813 if (pe->pbus) { 814 int count; 815 816 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER; 817 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER; 818 if (pe->flags & PNV_IODA_PE_BUS_ALL) 819 count = resource_size(&pe->pbus->busn_res); 820 else 821 count = 1; 822 823 switch(count) { 824 case 1: bcomp = OpalPciBusAll; break; 825 case 2: bcomp = OpalPciBus7Bits; break; 826 case 4: bcomp = OpalPciBus6Bits; break; 827 case 8: bcomp = OpalPciBus5Bits; break; 828 case 16: bcomp = OpalPciBus4Bits; break; 829 case 32: bcomp = OpalPciBus3Bits; break; 830 default: 831 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n", 832 count); 833 /* Do an exact match only */ 834 bcomp = OpalPciBusAll; 835 } 836 rid_end = pe->rid + (count << 8); 837 } else { 838 bcomp = OpalPciBusAll; 839 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER; 840 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER; 841 rid_end = pe->rid + 1; 842 } 843 844 /* 845 * Associate PE in PELT. We need add the PE into the 846 * corresponding PELT-V as well. Otherwise, the error 847 * originated from the PE might contribute to other 848 * PEs. 849 */ 850 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, 851 bcomp, dcomp, fcomp, OPAL_MAP_PE); 852 if (rc) { 853 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc); 854 return -ENXIO; 855 } 856 857 /* 858 * Configure PELTV. NPUs don't have a PELTV table so skip 859 * configuration on them. 860 */ 861 if (phb->type != PNV_PHB_NPU_OCAPI) 862 pnv_ioda_set_peltv(phb, pe, true); 863 864 /* Setup reverse map */ 865 for (rid = pe->rid; rid < rid_end; rid++) 866 phb->ioda.pe_rmap[rid] = pe->pe_number; 867 868 /* Setup one MVTs on IODA1 */ 869 if (phb->type != PNV_PHB_IODA1) { 870 pe->mve_number = 0; 871 goto out; 872 } 873 874 pe->mve_number = pe->pe_number; 875 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number); 876 if (rc != OPAL_SUCCESS) { 877 pe_err(pe, "OPAL error %ld setting up MVE %x\n", 878 rc, pe->mve_number); 879 pe->mve_number = -1; 880 } else { 881 rc = opal_pci_set_mve_enable(phb->opal_id, 882 pe->mve_number, OPAL_ENABLE_MVE); 883 if (rc) { 884 pe_err(pe, "OPAL error %ld enabling MVE %x\n", 885 rc, pe->mve_number); 886 pe->mve_number = -1; 887 } 888 } 889 890 out: 891 return 0; 892 } 893 894 static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) 895 { 896 struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); 897 struct pci_dn *pdn = pci_get_pdn(dev); 898 struct pnv_ioda_pe *pe; 899 900 if (!pdn) { 901 pr_err("%s: Device tree node not associated properly\n", 902 pci_name(dev)); 903 return NULL; 904 } 905 if (pdn->pe_number != IODA_INVALID_PE) 906 return NULL; 907 908 pe = pnv_ioda_alloc_pe(phb, 1); 909 if (!pe) { 910 pr_warn("%s: Not enough PE# available, disabling device\n", 911 pci_name(dev)); 912 return NULL; 913 } 914 915 /* NOTE: We don't get a reference for the pointer in the PE 916 * data structure, both the device and PE structures should be 917 * destroyed at the same time. 918 * 919 * At some point we want to remove the PDN completely anyways 920 */ 921 pdn->pe_number = pe->pe_number; 922 pe->flags = PNV_IODA_PE_DEV; 923 pe->pdev = dev; 924 pe->pbus = NULL; 925 pe->mve_number = -1; 926 pe->rid = dev->bus->number << 8 | pdn->devfn; 927 pe->device_count++; 928 929 pe_info(pe, "Associated device to PE\n"); 930 931 if (pnv_ioda_configure_pe(phb, pe)) { 932 /* XXX What do we do here ? */ 933 pnv_ioda_free_pe(pe); 934 pdn->pe_number = IODA_INVALID_PE; 935 pe->pdev = NULL; 936 return NULL; 937 } 938 939 /* Put PE to the list */ 940 mutex_lock(&phb->ioda.pe_list_mutex); 941 list_add_tail(&pe->list, &phb->ioda.pe_list); 942 mutex_unlock(&phb->ioda.pe_list_mutex); 943 return pe; 944 } 945 946 /* 947 * There're 2 types of PCI bus sensitive PEs: One that is compromised of 948 * single PCI bus. Another one that contains the primary PCI bus and its 949 * subordinate PCI devices and buses. The second type of PE is normally 950 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports. 951 */ 952 static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all) 953 { 954 struct pnv_phb *phb = pci_bus_to_pnvhb(bus); 955 struct pnv_ioda_pe *pe = NULL; 956 unsigned int pe_num; 957 958 /* 959 * In partial hotplug case, the PE instance might be still alive. 960 * We should reuse it instead of allocating a new one. 961 */ 962 pe_num = phb->ioda.pe_rmap[bus->number << 8]; 963 if (WARN_ON(pe_num != IODA_INVALID_PE)) { 964 pe = &phb->ioda.pe_array[pe_num]; 965 return NULL; 966 } 967 968 /* PE number for root bus should have been reserved */ 969 if (pci_is_root_bus(bus)) 970 pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx]; 971 972 /* Check if PE is determined by M64 */ 973 if (!pe) 974 pe = pnv_ioda_pick_m64_pe(bus, all); 975 976 /* The PE number isn't pinned by M64 */ 977 if (!pe) 978 pe = pnv_ioda_alloc_pe(phb, 1); 979 980 if (!pe) { 981 pr_warn("%s: Not enough PE# available for PCI bus %04x:%02x\n", 982 __func__, pci_domain_nr(bus), bus->number); 983 return NULL; 984 } 985 986 pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS); 987 pe->pbus = bus; 988 pe->pdev = NULL; 989 pe->mve_number = -1; 990 pe->rid = bus->busn_res.start << 8; 991 992 if (all) 993 pe_info(pe, "Secondary bus %pad..%pad associated with PE#%x\n", 994 &bus->busn_res.start, &bus->busn_res.end, 995 pe->pe_number); 996 else 997 pe_info(pe, "Secondary bus %pad associated with PE#%x\n", 998 &bus->busn_res.start, pe->pe_number); 999 1000 if (pnv_ioda_configure_pe(phb, pe)) { 1001 /* XXX What do we do here ? */ 1002 pnv_ioda_free_pe(pe); 1003 pe->pbus = NULL; 1004 return NULL; 1005 } 1006 1007 /* Put PE to the list */ 1008 list_add_tail(&pe->list, &phb->ioda.pe_list); 1009 1010 return pe; 1011 } 1012 1013 static void pnv_pci_ioda_dma_dev_setup(struct pci_dev *pdev) 1014 { 1015 struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); 1016 struct pci_dn *pdn = pci_get_pdn(pdev); 1017 struct pnv_ioda_pe *pe; 1018 1019 /* Check if the BDFN for this device is associated with a PE yet */ 1020 pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8)); 1021 if (!pe) { 1022 /* VF PEs should be pre-configured in pnv_pci_sriov_enable() */ 1023 if (WARN_ON(pdev->is_virtfn)) 1024 return; 1025 1026 pnv_pci_configure_bus(pdev->bus); 1027 pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8)); 1028 pci_info(pdev, "Configured PE#%x\n", pe ? pe->pe_number : 0xfffff); 1029 1030 1031 /* 1032 * If we can't setup the IODA PE something has gone horribly 1033 * wrong and we can't enable DMA for the device. 1034 */ 1035 if (WARN_ON(!pe)) 1036 return; 1037 } else { 1038 pci_info(pdev, "Added to existing PE#%x\n", pe->pe_number); 1039 } 1040 1041 /* 1042 * We assume that bridges *probably* don't need to do any DMA so we can 1043 * skip allocating a TCE table, etc unless we get a non-bridge device. 1044 */ 1045 if (!pe->dma_setup_done && !pci_is_bridge(pdev)) { 1046 switch (phb->type) { 1047 case PNV_PHB_IODA2: 1048 pnv_pci_ioda2_setup_dma_pe(phb, pe); 1049 break; 1050 default: 1051 pr_warn("%s: No DMA for PHB#%x (type %d)\n", 1052 __func__, phb->hose->global_number, phb->type); 1053 } 1054 } 1055 1056 if (pdn) 1057 pdn->pe_number = pe->pe_number; 1058 pe->device_count++; 1059 1060 WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops); 1061 pdev->dev.archdata.dma_offset = pe->tce_bypass_base; 1062 set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]); 1063 1064 /* PEs with a DMA weight of zero won't have a group */ 1065 if (pe->table_group.group) 1066 iommu_add_device(&pe->table_group, &pdev->dev); 1067 } 1068 1069 /* 1070 * Reconfigure TVE#0 to be usable as 64-bit DMA space. 1071 * 1072 * The first 4GB of virtual memory for a PE is reserved for 32-bit accesses. 1073 * Devices can only access more than that if bit 59 of the PCI address is set 1074 * by hardware, which indicates TVE#1 should be used instead of TVE#0. 1075 * Many PCI devices are not capable of addressing that many bits, and as a 1076 * result are limited to the 4GB of virtual memory made available to 32-bit 1077 * devices in TVE#0. 1078 * 1079 * In order to work around this, reconfigure TVE#0 to be suitable for 64-bit 1080 * devices by configuring the virtual memory past the first 4GB inaccessible 1081 * by 64-bit DMAs. This should only be used by devices that want more than 1082 * 4GB, and only on PEs that have no 32-bit devices. 1083 * 1084 * Currently this will only work on PHB3 (POWER8). 1085 */ 1086 static int pnv_pci_ioda_dma_64bit_bypass(struct pnv_ioda_pe *pe) 1087 { 1088 u64 window_size, table_size, tce_count, addr; 1089 struct page *table_pages; 1090 u64 tce_order = 28; /* 256MB TCEs */ 1091 __be64 *tces; 1092 s64 rc; 1093 1094 /* 1095 * Window size needs to be a power of two, but needs to account for 1096 * shifting memory by the 4GB offset required to skip 32bit space. 1097 */ 1098 window_size = roundup_pow_of_two(memory_hotplug_max() + (1ULL << 32)); 1099 tce_count = window_size >> tce_order; 1100 table_size = tce_count << 3; 1101 1102 if (table_size < PAGE_SIZE) 1103 table_size = PAGE_SIZE; 1104 1105 table_pages = alloc_pages_node(pe->phb->hose->node, GFP_KERNEL, 1106 get_order(table_size)); 1107 if (!table_pages) 1108 goto err; 1109 1110 tces = page_address(table_pages); 1111 if (!tces) 1112 goto err; 1113 1114 memset(tces, 0, table_size); 1115 1116 for (addr = 0; addr < memory_hotplug_max(); addr += (1 << tce_order)) { 1117 tces[(addr + (1ULL << 32)) >> tce_order] = 1118 cpu_to_be64(addr | TCE_PCI_READ | TCE_PCI_WRITE); 1119 } 1120 1121 rc = opal_pci_map_pe_dma_window(pe->phb->opal_id, 1122 pe->pe_number, 1123 /* reconfigure window 0 */ 1124 (pe->pe_number << 1) + 0, 1125 1, 1126 __pa(tces), 1127 table_size, 1128 1 << tce_order); 1129 if (rc == OPAL_SUCCESS) { 1130 pe_info(pe, "Using 64-bit DMA iommu bypass (through TVE#0)\n"); 1131 return 0; 1132 } 1133 err: 1134 pe_err(pe, "Error configuring 64-bit DMA bypass\n"); 1135 return -EIO; 1136 } 1137 1138 static bool pnv_pci_ioda_iommu_bypass_supported(struct pci_dev *pdev, 1139 u64 dma_mask) 1140 { 1141 struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); 1142 struct pci_dn *pdn = pci_get_pdn(pdev); 1143 struct pnv_ioda_pe *pe; 1144 1145 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) 1146 return false; 1147 1148 pe = &phb->ioda.pe_array[pdn->pe_number]; 1149 if (pe->tce_bypass_enabled) { 1150 u64 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1; 1151 if (dma_mask >= top) 1152 return true; 1153 } 1154 1155 /* 1156 * If the device can't set the TCE bypass bit but still wants 1157 * to access 4GB or more, on PHB3 we can reconfigure TVE#0 to 1158 * bypass the 32-bit region and be usable for 64-bit DMAs. 1159 * The device needs to be able to address all of this space. 1160 */ 1161 if (dma_mask >> 32 && 1162 dma_mask > (memory_hotplug_max() + (1ULL << 32)) && 1163 /* pe->pdev should be set if it's a single device, pe->pbus if not */ 1164 (pe->device_count == 1 || !pe->pbus) && 1165 phb->model == PNV_PHB_MODEL_PHB3) { 1166 /* Configure the bypass mode */ 1167 s64 rc = pnv_pci_ioda_dma_64bit_bypass(pe); 1168 if (rc) 1169 return false; 1170 /* 4GB offset bypasses 32-bit space */ 1171 pdev->dev.archdata.dma_offset = (1ULL << 32); 1172 return true; 1173 } 1174 1175 return false; 1176 } 1177 1178 static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb) 1179 { 1180 return phb->regs + 0x210; 1181 } 1182 1183 #ifdef CONFIG_IOMMU_API 1184 /* Common for IODA1 and IODA2 */ 1185 static int pnv_ioda_tce_xchg_no_kill(struct iommu_table *tbl, long index, 1186 unsigned long *hpa, enum dma_data_direction *direction) 1187 { 1188 return pnv_tce_xchg(tbl, index, hpa, direction); 1189 } 1190 #endif 1191 1192 #define PHB3_TCE_KILL_INVAL_ALL PPC_BIT(0) 1193 #define PHB3_TCE_KILL_INVAL_PE PPC_BIT(1) 1194 #define PHB3_TCE_KILL_INVAL_ONE PPC_BIT(2) 1195 1196 static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe) 1197 { 1198 /* 01xb - invalidate TCEs that match the specified PE# */ 1199 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb); 1200 unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF); 1201 1202 mb(); /* Ensure above stores are visible */ 1203 __raw_writeq_be(val, invalidate); 1204 } 1205 1206 static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, 1207 unsigned shift, unsigned long index, 1208 unsigned long npages) 1209 { 1210 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb); 1211 unsigned long start, end, inc; 1212 1213 /* We'll invalidate DMA address in PE scope */ 1214 start = PHB3_TCE_KILL_INVAL_ONE; 1215 start |= (pe->pe_number & 0xFF); 1216 end = start; 1217 1218 /* Figure out the start, end and step */ 1219 start |= (index << shift); 1220 end |= ((index + npages - 1) << shift); 1221 inc = (0x1ull << shift); 1222 mb(); 1223 1224 while (start <= end) { 1225 __raw_writeq_be(start, invalidate); 1226 start += inc; 1227 } 1228 } 1229 1230 static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe) 1231 { 1232 struct pnv_phb *phb = pe->phb; 1233 1234 if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs) 1235 pnv_pci_phb3_tce_invalidate_pe(pe); 1236 else 1237 opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL_PE, 1238 pe->pe_number, 0, 0, 0); 1239 } 1240 1241 static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, 1242 unsigned long index, unsigned long npages) 1243 { 1244 struct iommu_table_group_link *tgl; 1245 1246 list_for_each_entry_lockless(tgl, &tbl->it_group_list, next) { 1247 struct pnv_ioda_pe *pe = container_of(tgl->table_group, 1248 struct pnv_ioda_pe, table_group); 1249 struct pnv_phb *phb = pe->phb; 1250 unsigned int shift = tbl->it_page_shift; 1251 1252 if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs) 1253 pnv_pci_phb3_tce_invalidate(pe, shift, 1254 index, npages); 1255 else 1256 opal_pci_tce_kill(phb->opal_id, 1257 OPAL_PCI_TCE_KILL_PAGES, 1258 pe->pe_number, 1u << shift, 1259 index << shift, npages); 1260 } 1261 } 1262 1263 static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index, 1264 long npages, unsigned long uaddr, 1265 enum dma_data_direction direction, 1266 unsigned long attrs) 1267 { 1268 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, 1269 attrs); 1270 1271 if (!ret) 1272 pnv_pci_ioda2_tce_invalidate(tbl, index, npages); 1273 1274 return ret; 1275 } 1276 1277 static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index, 1278 long npages) 1279 { 1280 pnv_tce_free(tbl, index, npages); 1281 1282 pnv_pci_ioda2_tce_invalidate(tbl, index, npages); 1283 } 1284 1285 static struct iommu_table_ops pnv_ioda2_iommu_ops = { 1286 .set = pnv_ioda2_tce_build, 1287 #ifdef CONFIG_IOMMU_API 1288 .xchg_no_kill = pnv_ioda_tce_xchg_no_kill, 1289 .tce_kill = pnv_pci_ioda2_tce_invalidate, 1290 .useraddrptr = pnv_tce_useraddrptr, 1291 #endif 1292 .clear = pnv_ioda2_tce_free, 1293 .get = pnv_tce_get, 1294 .free = pnv_pci_ioda2_table_free_pages, 1295 }; 1296 1297 static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group, 1298 int num, struct iommu_table *tbl) 1299 { 1300 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, 1301 table_group); 1302 struct pnv_phb *phb = pe->phb; 1303 int64_t rc; 1304 const unsigned long size = tbl->it_indirect_levels ? 1305 tbl->it_level_size : tbl->it_size; 1306 const __u64 start_addr = tbl->it_offset << tbl->it_page_shift; 1307 const __u64 win_size = tbl->it_size << tbl->it_page_shift; 1308 1309 pe_info(pe, "Setting up window#%d %llx..%llx pg=%lx\n", 1310 num, start_addr, start_addr + win_size - 1, 1311 IOMMU_PAGE_SIZE(tbl)); 1312 1313 /* 1314 * Map TCE table through TVT. The TVE index is the PE number 1315 * shifted by 1 bit for 32-bits DMA space. 1316 */ 1317 rc = opal_pci_map_pe_dma_window(phb->opal_id, 1318 pe->pe_number, 1319 (pe->pe_number << 1) + num, 1320 tbl->it_indirect_levels + 1, 1321 __pa(tbl->it_base), 1322 size << 3, 1323 IOMMU_PAGE_SIZE(tbl)); 1324 if (rc) { 1325 pe_err(pe, "Failed to configure TCE table, err %lld\n", rc); 1326 return rc; 1327 } 1328 1329 pnv_pci_link_table_and_group(phb->hose->node, num, 1330 tbl, &pe->table_group); 1331 pnv_pci_ioda2_tce_invalidate_pe(pe); 1332 1333 return 0; 1334 } 1335 1336 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable) 1337 { 1338 uint16_t window_id = (pe->pe_number << 1 ) + 1; 1339 int64_t rc; 1340 1341 pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis"); 1342 if (enable) { 1343 phys_addr_t top = memblock_end_of_DRAM(); 1344 1345 top = roundup_pow_of_two(top); 1346 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, 1347 pe->pe_number, 1348 window_id, 1349 pe->tce_bypass_base, 1350 top); 1351 } else { 1352 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, 1353 pe->pe_number, 1354 window_id, 1355 pe->tce_bypass_base, 1356 0); 1357 } 1358 if (rc) 1359 pe_err(pe, "OPAL error %lld configuring bypass window\n", rc); 1360 else 1361 pe->tce_bypass_enabled = enable; 1362 } 1363 1364 static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group, 1365 int num, __u32 page_shift, __u64 window_size, __u32 levels, 1366 bool alloc_userspace_copy, struct iommu_table **ptbl) 1367 { 1368 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, 1369 table_group); 1370 int nid = pe->phb->hose->node; 1371 __u64 bus_offset = num ? pe->tce_bypass_base : table_group->tce32_start; 1372 long ret; 1373 struct iommu_table *tbl; 1374 1375 tbl = pnv_pci_table_alloc(nid); 1376 if (!tbl) 1377 return -ENOMEM; 1378 1379 tbl->it_ops = &pnv_ioda2_iommu_ops; 1380 1381 ret = pnv_pci_ioda2_table_alloc_pages(nid, 1382 bus_offset, page_shift, window_size, 1383 levels, alloc_userspace_copy, tbl); 1384 if (ret) { 1385 iommu_tce_table_put(tbl); 1386 return ret; 1387 } 1388 1389 *ptbl = tbl; 1390 1391 return 0; 1392 } 1393 1394 static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe) 1395 { 1396 struct iommu_table *tbl = NULL; 1397 long rc; 1398 unsigned long res_start, res_end; 1399 1400 /* 1401 * crashkernel= specifies the kdump kernel's maximum memory at 1402 * some offset and there is no guaranteed the result is a power 1403 * of 2, which will cause errors later. 1404 */ 1405 const u64 max_memory = __rounddown_pow_of_two(memory_hotplug_max()); 1406 1407 /* 1408 * In memory constrained environments, e.g. kdump kernel, the 1409 * DMA window can be larger than available memory, which will 1410 * cause errors later. 1411 */ 1412 const u64 maxblock = 1UL << (PAGE_SHIFT + MAX_ORDER); 1413 1414 /* 1415 * We create the default window as big as we can. The constraint is 1416 * the max order of allocation possible. The TCE table is likely to 1417 * end up being multilevel and with on-demand allocation in place, 1418 * the initial use is not going to be huge as the default window aims 1419 * to support crippled devices (i.e. not fully 64bit DMAble) only. 1420 */ 1421 /* iommu_table::it_map uses 1 bit per IOMMU page, hence 8 */ 1422 const u64 window_size = min((maxblock * 8) << PAGE_SHIFT, max_memory); 1423 /* Each TCE level cannot exceed maxblock so go multilevel if needed */ 1424 unsigned long tces_order = ilog2(window_size >> PAGE_SHIFT); 1425 unsigned long tcelevel_order = ilog2(maxblock >> 3); 1426 unsigned int levels = tces_order / tcelevel_order; 1427 1428 if (tces_order % tcelevel_order) 1429 levels += 1; 1430 /* 1431 * We try to stick to default levels (which is >1 at the moment) in 1432 * order to save memory by relying on on-demain TCE level allocation. 1433 */ 1434 levels = max_t(unsigned int, levels, POWERNV_IOMMU_DEFAULT_LEVELS); 1435 1436 rc = pnv_pci_ioda2_create_table(&pe->table_group, 0, PAGE_SHIFT, 1437 window_size, levels, false, &tbl); 1438 if (rc) { 1439 pe_err(pe, "Failed to create 32-bit TCE table, err %ld", 1440 rc); 1441 return rc; 1442 } 1443 1444 /* We use top part of 32bit space for MMIO so exclude it from DMA */ 1445 res_start = 0; 1446 res_end = 0; 1447 if (window_size > pe->phb->ioda.m32_pci_base) { 1448 res_start = pe->phb->ioda.m32_pci_base >> tbl->it_page_shift; 1449 res_end = min(window_size, SZ_4G) >> tbl->it_page_shift; 1450 } 1451 1452 tbl->it_index = (pe->phb->hose->global_number << 16) | pe->pe_number; 1453 if (iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end)) 1454 rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl); 1455 else 1456 rc = -ENOMEM; 1457 if (rc) { 1458 pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n", rc); 1459 iommu_tce_table_put(tbl); 1460 tbl = NULL; /* This clears iommu_table_base below */ 1461 } 1462 if (!pnv_iommu_bypass_disabled) 1463 pnv_pci_ioda2_set_bypass(pe, true); 1464 1465 /* 1466 * Set table base for the case of IOMMU DMA use. Usually this is done 1467 * from dma_dev_setup() which is not called when a device is returned 1468 * from VFIO so do it here. 1469 */ 1470 if (pe->pdev) 1471 set_iommu_table_base(&pe->pdev->dev, tbl); 1472 1473 return 0; 1474 } 1475 1476 static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group, 1477 int num) 1478 { 1479 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, 1480 table_group); 1481 struct pnv_phb *phb = pe->phb; 1482 long ret; 1483 1484 pe_info(pe, "Removing DMA window #%d\n", num); 1485 1486 ret = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number, 1487 (pe->pe_number << 1) + num, 1488 0/* levels */, 0/* table address */, 1489 0/* table size */, 0/* page size */); 1490 if (ret) 1491 pe_warn(pe, "Unmapping failed, ret = %ld\n", ret); 1492 else 1493 pnv_pci_ioda2_tce_invalidate_pe(pe); 1494 1495 pnv_pci_unlink_table_and_group(table_group->tables[num], table_group); 1496 1497 return ret; 1498 } 1499 1500 #ifdef CONFIG_IOMMU_API 1501 unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift, 1502 __u64 window_size, __u32 levels) 1503 { 1504 unsigned long bytes = 0; 1505 const unsigned window_shift = ilog2(window_size); 1506 unsigned entries_shift = window_shift - page_shift; 1507 unsigned table_shift = entries_shift + 3; 1508 unsigned long tce_table_size = max(0x1000UL, 1UL << table_shift); 1509 unsigned long direct_table_size; 1510 1511 if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS) || 1512 !is_power_of_2(window_size)) 1513 return 0; 1514 1515 /* Calculate a direct table size from window_size and levels */ 1516 entries_shift = (entries_shift + levels - 1) / levels; 1517 table_shift = entries_shift + 3; 1518 table_shift = max_t(unsigned, table_shift, PAGE_SHIFT); 1519 direct_table_size = 1UL << table_shift; 1520 1521 for ( ; levels; --levels) { 1522 bytes += ALIGN(tce_table_size, direct_table_size); 1523 1524 tce_table_size /= direct_table_size; 1525 tce_table_size <<= 3; 1526 tce_table_size = max_t(unsigned long, 1527 tce_table_size, direct_table_size); 1528 } 1529 1530 return bytes + bytes; /* one for HW table, one for userspace copy */ 1531 } 1532 1533 static long pnv_pci_ioda2_create_table_userspace( 1534 struct iommu_table_group *table_group, 1535 int num, __u32 page_shift, __u64 window_size, __u32 levels, 1536 struct iommu_table **ptbl) 1537 { 1538 long ret = pnv_pci_ioda2_create_table(table_group, 1539 num, page_shift, window_size, levels, true, ptbl); 1540 1541 if (!ret) 1542 (*ptbl)->it_allocated_size = pnv_pci_ioda2_get_table_size( 1543 page_shift, window_size, levels); 1544 return ret; 1545 } 1546 1547 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) 1548 { 1549 struct pci_dev *dev; 1550 1551 list_for_each_entry(dev, &bus->devices, bus_list) { 1552 set_iommu_table_base(&dev->dev, pe->table_group.tables[0]); 1553 dev->dev.archdata.dma_offset = pe->tce_bypass_base; 1554 1555 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) 1556 pnv_ioda_setup_bus_dma(pe, dev->subordinate); 1557 } 1558 } 1559 1560 static long pnv_ioda2_take_ownership(struct iommu_table_group *table_group) 1561 { 1562 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, 1563 table_group); 1564 /* Store @tbl as pnv_pci_ioda2_unset_window() resets it */ 1565 struct iommu_table *tbl = pe->table_group.tables[0]; 1566 1567 /* 1568 * iommu_ops transfers the ownership per a device and we mode 1569 * the group ownership with the first device in the group. 1570 */ 1571 if (!tbl) 1572 return 0; 1573 1574 pnv_pci_ioda2_set_bypass(pe, false); 1575 pnv_pci_ioda2_unset_window(&pe->table_group, 0); 1576 if (pe->pbus) 1577 pnv_ioda_setup_bus_dma(pe, pe->pbus); 1578 else if (pe->pdev) 1579 set_iommu_table_base(&pe->pdev->dev, NULL); 1580 iommu_tce_table_put(tbl); 1581 1582 return 0; 1583 } 1584 1585 static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group) 1586 { 1587 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, 1588 table_group); 1589 1590 /* See the comment about iommu_ops above */ 1591 if (pe->table_group.tables[0]) 1592 return; 1593 pnv_pci_ioda2_setup_default_config(pe); 1594 if (pe->pbus) 1595 pnv_ioda_setup_bus_dma(pe, pe->pbus); 1596 } 1597 1598 static struct iommu_table_group_ops pnv_pci_ioda2_ops = { 1599 .get_table_size = pnv_pci_ioda2_get_table_size, 1600 .create_table = pnv_pci_ioda2_create_table_userspace, 1601 .set_window = pnv_pci_ioda2_set_window, 1602 .unset_window = pnv_pci_ioda2_unset_window, 1603 .take_ownership = pnv_ioda2_take_ownership, 1604 .release_ownership = pnv_ioda2_release_ownership, 1605 }; 1606 #endif 1607 1608 void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, 1609 struct pnv_ioda_pe *pe) 1610 { 1611 int64_t rc; 1612 1613 /* TVE #1 is selected by PCI address bit 59 */ 1614 pe->tce_bypass_base = 1ull << 59; 1615 1616 /* The PE will reserve all possible 32-bits space */ 1617 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n", 1618 phb->ioda.m32_pci_base); 1619 1620 /* Setup linux iommu table */ 1621 pe->table_group.tce32_start = 0; 1622 pe->table_group.tce32_size = phb->ioda.m32_pci_base; 1623 pe->table_group.max_dynamic_windows_supported = 1624 IOMMU_TABLE_GROUP_MAX_TABLES; 1625 pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS; 1626 pe->table_group.pgsizes = pnv_ioda_parse_tce_sizes(phb); 1627 1628 rc = pnv_pci_ioda2_setup_default_config(pe); 1629 if (rc) 1630 return; 1631 1632 #ifdef CONFIG_IOMMU_API 1633 pe->table_group.ops = &pnv_pci_ioda2_ops; 1634 iommu_register_group(&pe->table_group, phb->hose->global_number, 1635 pe->pe_number); 1636 #endif 1637 pe->dma_setup_done = true; 1638 } 1639 1640 /* 1641 * Called from KVM in real mode to EOI passthru interrupts. The ICP 1642 * EOI is handled directly in KVM in kvmppc_deliver_irq_passthru(). 1643 * 1644 * The IRQ data is mapped in the PCI-MSI domain and the EOI OPAL call 1645 * needs an HW IRQ number mapped in the XICS IRQ domain. The HW IRQ 1646 * numbers of the in-the-middle MSI domain are vector numbers and it's 1647 * good enough for OPAL. Use that. 1648 */ 1649 int64_t pnv_opal_pci_msi_eoi(struct irq_data *d) 1650 { 1651 struct pci_controller *hose = irq_data_get_irq_chip_data(d->parent_data); 1652 struct pnv_phb *phb = hose->private_data; 1653 1654 return opal_pci_msi_eoi(phb->opal_id, d->parent_data->hwirq); 1655 } 1656 1657 /* 1658 * The IRQ data is mapped in the XICS domain, with OPAL HW IRQ numbers 1659 */ 1660 static void pnv_ioda2_msi_eoi(struct irq_data *d) 1661 { 1662 int64_t rc; 1663 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 1664 struct pci_controller *hose = irq_data_get_irq_chip_data(d); 1665 struct pnv_phb *phb = hose->private_data; 1666 1667 rc = opal_pci_msi_eoi(phb->opal_id, hw_irq); 1668 WARN_ON_ONCE(rc); 1669 1670 icp_native_eoi(d); 1671 } 1672 1673 /* P8/CXL only */ 1674 void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq) 1675 { 1676 struct irq_data *idata; 1677 struct irq_chip *ichip; 1678 1679 /* The MSI EOI OPAL call is only needed on PHB3 */ 1680 if (phb->model != PNV_PHB_MODEL_PHB3) 1681 return; 1682 1683 if (!phb->ioda.irq_chip_init) { 1684 /* 1685 * First time we setup an MSI IRQ, we need to setup the 1686 * corresponding IRQ chip to route correctly. 1687 */ 1688 idata = irq_get_irq_data(virq); 1689 ichip = irq_data_get_irq_chip(idata); 1690 phb->ioda.irq_chip_init = 1; 1691 phb->ioda.irq_chip = *ichip; 1692 phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi; 1693 } 1694 irq_set_chip(virq, &phb->ioda.irq_chip); 1695 irq_set_chip_data(virq, phb->hose); 1696 } 1697 1698 static struct irq_chip pnv_pci_msi_irq_chip; 1699 1700 /* 1701 * Returns true iff chip is something that we could call 1702 * pnv_opal_pci_msi_eoi for. 1703 */ 1704 bool is_pnv_opal_msi(struct irq_chip *chip) 1705 { 1706 return chip == &pnv_pci_msi_irq_chip; 1707 } 1708 EXPORT_SYMBOL_GPL(is_pnv_opal_msi); 1709 1710 static int __pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, 1711 unsigned int xive_num, 1712 unsigned int is_64, struct msi_msg *msg) 1713 { 1714 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); 1715 __be32 data; 1716 int rc; 1717 1718 dev_dbg(&dev->dev, "%s: setup %s-bit MSI for vector #%d\n", __func__, 1719 is_64 ? "64" : "32", xive_num); 1720 1721 /* No PE assigned ? bail out ... no MSI for you ! */ 1722 if (pe == NULL) 1723 return -ENXIO; 1724 1725 /* Check if we have an MVE */ 1726 if (pe->mve_number < 0) 1727 return -ENXIO; 1728 1729 /* Force 32-bit MSI on some broken devices */ 1730 if (dev->no_64bit_msi) 1731 is_64 = 0; 1732 1733 /* Assign XIVE to PE */ 1734 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num); 1735 if (rc) { 1736 pr_warn("%s: OPAL error %d setting XIVE %d PE\n", 1737 pci_name(dev), rc, xive_num); 1738 return -EIO; 1739 } 1740 1741 if (is_64) { 1742 __be64 addr64; 1743 1744 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1, 1745 &addr64, &data); 1746 if (rc) { 1747 pr_warn("%s: OPAL error %d getting 64-bit MSI data\n", 1748 pci_name(dev), rc); 1749 return -EIO; 1750 } 1751 msg->address_hi = be64_to_cpu(addr64) >> 32; 1752 msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful; 1753 } else { 1754 __be32 addr32; 1755 1756 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1, 1757 &addr32, &data); 1758 if (rc) { 1759 pr_warn("%s: OPAL error %d getting 32-bit MSI data\n", 1760 pci_name(dev), rc); 1761 return -EIO; 1762 } 1763 msg->address_hi = 0; 1764 msg->address_lo = be32_to_cpu(addr32); 1765 } 1766 msg->data = be32_to_cpu(data); 1767 1768 return 0; 1769 } 1770 1771 /* 1772 * The msi_free() op is called before irq_domain_free_irqs_top() when 1773 * the handler data is still available. Use that to clear the XIVE 1774 * controller. 1775 */ 1776 static void pnv_msi_ops_msi_free(struct irq_domain *domain, 1777 struct msi_domain_info *info, 1778 unsigned int irq) 1779 { 1780 if (xive_enabled()) 1781 xive_irq_free_data(irq); 1782 } 1783 1784 static struct msi_domain_ops pnv_pci_msi_domain_ops = { 1785 .msi_free = pnv_msi_ops_msi_free, 1786 }; 1787 1788 static void pnv_msi_shutdown(struct irq_data *d) 1789 { 1790 d = d->parent_data; 1791 if (d->chip->irq_shutdown) 1792 d->chip->irq_shutdown(d); 1793 } 1794 1795 static void pnv_msi_mask(struct irq_data *d) 1796 { 1797 pci_msi_mask_irq(d); 1798 irq_chip_mask_parent(d); 1799 } 1800 1801 static void pnv_msi_unmask(struct irq_data *d) 1802 { 1803 pci_msi_unmask_irq(d); 1804 irq_chip_unmask_parent(d); 1805 } 1806 1807 static struct irq_chip pnv_pci_msi_irq_chip = { 1808 .name = "PNV-PCI-MSI", 1809 .irq_shutdown = pnv_msi_shutdown, 1810 .irq_mask = pnv_msi_mask, 1811 .irq_unmask = pnv_msi_unmask, 1812 .irq_eoi = irq_chip_eoi_parent, 1813 }; 1814 1815 static struct msi_domain_info pnv_msi_domain_info = { 1816 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 1817 MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), 1818 .ops = &pnv_pci_msi_domain_ops, 1819 .chip = &pnv_pci_msi_irq_chip, 1820 }; 1821 1822 static void pnv_msi_compose_msg(struct irq_data *d, struct msi_msg *msg) 1823 { 1824 struct msi_desc *entry = irq_data_get_msi_desc(d); 1825 struct pci_dev *pdev = msi_desc_to_pci_dev(entry); 1826 struct pci_controller *hose = irq_data_get_irq_chip_data(d); 1827 struct pnv_phb *phb = hose->private_data; 1828 int rc; 1829 1830 rc = __pnv_pci_ioda_msi_setup(phb, pdev, d->hwirq, 1831 entry->pci.msi_attrib.is_64, msg); 1832 if (rc) 1833 dev_err(&pdev->dev, "Failed to setup %s-bit MSI #%ld : %d\n", 1834 entry->pci.msi_attrib.is_64 ? "64" : "32", d->hwirq, rc); 1835 } 1836 1837 /* 1838 * The IRQ data is mapped in the MSI domain in which HW IRQ numbers 1839 * correspond to vector numbers. 1840 */ 1841 static void pnv_msi_eoi(struct irq_data *d) 1842 { 1843 struct pci_controller *hose = irq_data_get_irq_chip_data(d); 1844 struct pnv_phb *phb = hose->private_data; 1845 1846 if (phb->model == PNV_PHB_MODEL_PHB3) { 1847 /* 1848 * The EOI OPAL call takes an OPAL HW IRQ number but 1849 * since it is translated into a vector number in 1850 * OPAL, use that directly. 1851 */ 1852 WARN_ON_ONCE(opal_pci_msi_eoi(phb->opal_id, d->hwirq)); 1853 } 1854 1855 irq_chip_eoi_parent(d); 1856 } 1857 1858 static struct irq_chip pnv_msi_irq_chip = { 1859 .name = "PNV-MSI", 1860 .irq_shutdown = pnv_msi_shutdown, 1861 .irq_mask = irq_chip_mask_parent, 1862 .irq_unmask = irq_chip_unmask_parent, 1863 .irq_eoi = pnv_msi_eoi, 1864 .irq_set_affinity = irq_chip_set_affinity_parent, 1865 .irq_compose_msi_msg = pnv_msi_compose_msg, 1866 }; 1867 1868 static int pnv_irq_parent_domain_alloc(struct irq_domain *domain, 1869 unsigned int virq, int hwirq) 1870 { 1871 struct irq_fwspec parent_fwspec; 1872 int ret; 1873 1874 parent_fwspec.fwnode = domain->parent->fwnode; 1875 parent_fwspec.param_count = 2; 1876 parent_fwspec.param[0] = hwirq; 1877 parent_fwspec.param[1] = IRQ_TYPE_EDGE_RISING; 1878 1879 ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec); 1880 if (ret) 1881 return ret; 1882 1883 return 0; 1884 } 1885 1886 static int pnv_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 1887 unsigned int nr_irqs, void *arg) 1888 { 1889 struct pci_controller *hose = domain->host_data; 1890 struct pnv_phb *phb = hose->private_data; 1891 msi_alloc_info_t *info = arg; 1892 struct pci_dev *pdev = msi_desc_to_pci_dev(info->desc); 1893 int hwirq; 1894 int i, ret; 1895 1896 hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, nr_irqs); 1897 if (hwirq < 0) { 1898 dev_warn(&pdev->dev, "failed to find a free MSI\n"); 1899 return -ENOSPC; 1900 } 1901 1902 dev_dbg(&pdev->dev, "%s bridge %pOF %d/%x #%d\n", __func__, 1903 hose->dn, virq, hwirq, nr_irqs); 1904 1905 for (i = 0; i < nr_irqs; i++) { 1906 ret = pnv_irq_parent_domain_alloc(domain, virq + i, 1907 phb->msi_base + hwirq + i); 1908 if (ret) 1909 goto out; 1910 1911 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, 1912 &pnv_msi_irq_chip, hose); 1913 } 1914 1915 return 0; 1916 1917 out: 1918 irq_domain_free_irqs_parent(domain, virq, i - 1); 1919 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, nr_irqs); 1920 return ret; 1921 } 1922 1923 static void pnv_irq_domain_free(struct irq_domain *domain, unsigned int virq, 1924 unsigned int nr_irqs) 1925 { 1926 struct irq_data *d = irq_domain_get_irq_data(domain, virq); 1927 struct pci_controller *hose = irq_data_get_irq_chip_data(d); 1928 struct pnv_phb *phb = hose->private_data; 1929 1930 pr_debug("%s bridge %pOF %d/%lx #%d\n", __func__, hose->dn, 1931 virq, d->hwirq, nr_irqs); 1932 1933 msi_bitmap_free_hwirqs(&phb->msi_bmp, d->hwirq, nr_irqs); 1934 /* XIVE domain is cleared through ->msi_free() */ 1935 } 1936 1937 static const struct irq_domain_ops pnv_irq_domain_ops = { 1938 .alloc = pnv_irq_domain_alloc, 1939 .free = pnv_irq_domain_free, 1940 }; 1941 1942 static int __init pnv_msi_allocate_domains(struct pci_controller *hose, unsigned int count) 1943 { 1944 struct pnv_phb *phb = hose->private_data; 1945 struct irq_domain *parent = irq_get_default_host(); 1946 1947 hose->fwnode = irq_domain_alloc_named_id_fwnode("PNV-MSI", phb->opal_id); 1948 if (!hose->fwnode) 1949 return -ENOMEM; 1950 1951 hose->dev_domain = irq_domain_create_hierarchy(parent, 0, count, 1952 hose->fwnode, 1953 &pnv_irq_domain_ops, hose); 1954 if (!hose->dev_domain) { 1955 pr_err("PCI: failed to create IRQ domain bridge %pOF (domain %d)\n", 1956 hose->dn, hose->global_number); 1957 irq_domain_free_fwnode(hose->fwnode); 1958 return -ENOMEM; 1959 } 1960 1961 hose->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(hose->dn), 1962 &pnv_msi_domain_info, 1963 hose->dev_domain); 1964 if (!hose->msi_domain) { 1965 pr_err("PCI: failed to create MSI IRQ domain bridge %pOF (domain %d)\n", 1966 hose->dn, hose->global_number); 1967 irq_domain_free_fwnode(hose->fwnode); 1968 irq_domain_remove(hose->dev_domain); 1969 return -ENOMEM; 1970 } 1971 1972 return 0; 1973 } 1974 1975 static void __init pnv_pci_init_ioda_msis(struct pnv_phb *phb) 1976 { 1977 unsigned int count; 1978 const __be32 *prop = of_get_property(phb->hose->dn, 1979 "ibm,opal-msi-ranges", NULL); 1980 if (!prop) { 1981 /* BML Fallback */ 1982 prop = of_get_property(phb->hose->dn, "msi-ranges", NULL); 1983 } 1984 if (!prop) 1985 return; 1986 1987 phb->msi_base = be32_to_cpup(prop); 1988 count = be32_to_cpup(prop + 1); 1989 if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) { 1990 pr_err("PCI %d: Failed to allocate MSI bitmap !\n", 1991 phb->hose->global_number); 1992 return; 1993 } 1994 1995 pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n", 1996 count, phb->msi_base); 1997 1998 pnv_msi_allocate_domains(phb->hose, count); 1999 } 2000 2001 static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe, 2002 struct resource *res) 2003 { 2004 struct pnv_phb *phb = pe->phb; 2005 struct pci_bus_region region; 2006 int index; 2007 int64_t rc; 2008 2009 if (!res || !res->flags || res->start > res->end || 2010 res->flags & IORESOURCE_UNSET) 2011 return; 2012 2013 if (res->flags & IORESOURCE_IO) { 2014 region.start = res->start - phb->ioda.io_pci_base; 2015 region.end = res->end - phb->ioda.io_pci_base; 2016 index = region.start / phb->ioda.io_segsize; 2017 2018 while (index < phb->ioda.total_pe_num && 2019 region.start <= region.end) { 2020 phb->ioda.io_segmap[index] = pe->pe_number; 2021 rc = opal_pci_map_pe_mmio_window(phb->opal_id, 2022 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index); 2023 if (rc != OPAL_SUCCESS) { 2024 pr_err("%s: Error %lld mapping IO segment#%d to PE#%x\n", 2025 __func__, rc, index, pe->pe_number); 2026 break; 2027 } 2028 2029 region.start += phb->ioda.io_segsize; 2030 index++; 2031 } 2032 } else if ((res->flags & IORESOURCE_MEM) && 2033 !pnv_pci_is_m64(phb, res)) { 2034 region.start = res->start - 2035 phb->hose->mem_offset[0] - 2036 phb->ioda.m32_pci_base; 2037 region.end = res->end - 2038 phb->hose->mem_offset[0] - 2039 phb->ioda.m32_pci_base; 2040 index = region.start / phb->ioda.m32_segsize; 2041 2042 while (index < phb->ioda.total_pe_num && 2043 region.start <= region.end) { 2044 phb->ioda.m32_segmap[index] = pe->pe_number; 2045 rc = opal_pci_map_pe_mmio_window(phb->opal_id, 2046 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index); 2047 if (rc != OPAL_SUCCESS) { 2048 pr_err("%s: Error %lld mapping M32 segment#%d to PE#%x", 2049 __func__, rc, index, pe->pe_number); 2050 break; 2051 } 2052 2053 region.start += phb->ioda.m32_segsize; 2054 index++; 2055 } 2056 } 2057 } 2058 2059 /* 2060 * This function is supposed to be called on basis of PE from top 2061 * to bottom style. So the I/O or MMIO segment assigned to 2062 * parent PE could be overridden by its child PEs if necessary. 2063 */ 2064 static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe) 2065 { 2066 struct pci_dev *pdev; 2067 int i; 2068 2069 /* 2070 * NOTE: We only care PCI bus based PE for now. For PCI 2071 * device based PE, for example SRIOV sensitive VF should 2072 * be figured out later. 2073 */ 2074 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))); 2075 2076 list_for_each_entry(pdev, &pe->pbus->devices, bus_list) { 2077 for (i = 0; i <= PCI_ROM_RESOURCE; i++) 2078 pnv_ioda_setup_pe_res(pe, &pdev->resource[i]); 2079 2080 /* 2081 * If the PE contains all subordinate PCI buses, the 2082 * windows of the child bridges should be mapped to 2083 * the PE as well. 2084 */ 2085 if (!(pe->flags & PNV_IODA_PE_BUS_ALL) || !pci_is_bridge(pdev)) 2086 continue; 2087 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) 2088 pnv_ioda_setup_pe_res(pe, 2089 &pdev->resource[PCI_BRIDGE_RESOURCES + i]); 2090 } 2091 } 2092 2093 #ifdef CONFIG_DEBUG_FS 2094 static int pnv_pci_diag_data_set(void *data, u64 val) 2095 { 2096 struct pnv_phb *phb = data; 2097 s64 ret; 2098 2099 /* Retrieve the diag data from firmware */ 2100 ret = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data, 2101 phb->diag_data_size); 2102 if (ret != OPAL_SUCCESS) 2103 return -EIO; 2104 2105 /* Print the diag data to the kernel log */ 2106 pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data); 2107 return 0; 2108 } 2109 2110 DEFINE_DEBUGFS_ATTRIBUTE(pnv_pci_diag_data_fops, NULL, pnv_pci_diag_data_set, 2111 "%llu\n"); 2112 2113 static int pnv_pci_ioda_pe_dump(void *data, u64 val) 2114 { 2115 struct pnv_phb *phb = data; 2116 int pe_num; 2117 2118 for (pe_num = 0; pe_num < phb->ioda.total_pe_num; pe_num++) { 2119 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_num]; 2120 2121 if (!test_bit(pe_num, phb->ioda.pe_alloc)) 2122 continue; 2123 2124 pe_warn(pe, "rid: %04x dev count: %2d flags: %s%s%s%s%s%s\n", 2125 pe->rid, pe->device_count, 2126 (pe->flags & PNV_IODA_PE_DEV) ? "dev " : "", 2127 (pe->flags & PNV_IODA_PE_BUS) ? "bus " : "", 2128 (pe->flags & PNV_IODA_PE_BUS_ALL) ? "all " : "", 2129 (pe->flags & PNV_IODA_PE_MASTER) ? "master " : "", 2130 (pe->flags & PNV_IODA_PE_SLAVE) ? "slave " : "", 2131 (pe->flags & PNV_IODA_PE_VF) ? "vf " : ""); 2132 } 2133 2134 return 0; 2135 } 2136 2137 DEFINE_DEBUGFS_ATTRIBUTE(pnv_pci_ioda_pe_dump_fops, NULL, 2138 pnv_pci_ioda_pe_dump, "%llu\n"); 2139 2140 #endif /* CONFIG_DEBUG_FS */ 2141 2142 static void pnv_pci_ioda_create_dbgfs(void) 2143 { 2144 #ifdef CONFIG_DEBUG_FS 2145 struct pci_controller *hose, *tmp; 2146 struct pnv_phb *phb; 2147 char name[16]; 2148 2149 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 2150 phb = hose->private_data; 2151 2152 sprintf(name, "PCI%04x", hose->global_number); 2153 phb->dbgfs = debugfs_create_dir(name, arch_debugfs_dir); 2154 2155 debugfs_create_file_unsafe("dump_diag_regs", 0200, phb->dbgfs, 2156 phb, &pnv_pci_diag_data_fops); 2157 debugfs_create_file_unsafe("dump_ioda_pe_state", 0200, phb->dbgfs, 2158 phb, &pnv_pci_ioda_pe_dump_fops); 2159 } 2160 #endif /* CONFIG_DEBUG_FS */ 2161 } 2162 2163 static void pnv_pci_enable_bridge(struct pci_bus *bus) 2164 { 2165 struct pci_dev *dev = bus->self; 2166 struct pci_bus *child; 2167 2168 /* Empty bus ? bail */ 2169 if (list_empty(&bus->devices)) 2170 return; 2171 2172 /* 2173 * If there's a bridge associated with that bus enable it. This works 2174 * around races in the generic code if the enabling is done during 2175 * parallel probing. This can be removed once those races have been 2176 * fixed. 2177 */ 2178 if (dev) { 2179 int rc = pci_enable_device(dev); 2180 if (rc) 2181 pci_err(dev, "Error enabling bridge (%d)\n", rc); 2182 pci_set_master(dev); 2183 } 2184 2185 /* Perform the same to child busses */ 2186 list_for_each_entry(child, &bus->children, node) 2187 pnv_pci_enable_bridge(child); 2188 } 2189 2190 static void pnv_pci_enable_bridges(void) 2191 { 2192 struct pci_controller *hose; 2193 2194 list_for_each_entry(hose, &hose_list, list_node) 2195 pnv_pci_enable_bridge(hose->bus); 2196 } 2197 2198 static void pnv_pci_ioda_fixup(void) 2199 { 2200 pnv_pci_ioda_create_dbgfs(); 2201 2202 pnv_pci_enable_bridges(); 2203 2204 #ifdef CONFIG_EEH 2205 pnv_eeh_post_init(); 2206 #endif 2207 } 2208 2209 /* 2210 * Returns the alignment for I/O or memory windows for P2P 2211 * bridges. That actually depends on how PEs are segmented. 2212 * For now, we return I/O or M32 segment size for PE sensitive 2213 * P2P bridges. Otherwise, the default values (4KiB for I/O, 2214 * 1MiB for memory) will be returned. 2215 * 2216 * The current PCI bus might be put into one PE, which was 2217 * create against the parent PCI bridge. For that case, we 2218 * needn't enlarge the alignment so that we can save some 2219 * resources. 2220 */ 2221 static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus, 2222 unsigned long type) 2223 { 2224 struct pnv_phb *phb = pci_bus_to_pnvhb(bus); 2225 int num_pci_bridges = 0; 2226 struct pci_dev *bridge; 2227 2228 bridge = bus->self; 2229 while (bridge) { 2230 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) { 2231 num_pci_bridges++; 2232 if (num_pci_bridges >= 2) 2233 return 1; 2234 } 2235 2236 bridge = bridge->bus->self; 2237 } 2238 2239 /* 2240 * We fall back to M32 if M64 isn't supported. We enforce the M64 2241 * alignment for any 64-bit resource, PCIe doesn't care and 2242 * bridges only do 64-bit prefetchable anyway. 2243 */ 2244 if (phb->ioda.m64_segsize && pnv_pci_is_m64_flags(type)) 2245 return phb->ioda.m64_segsize; 2246 if (type & IORESOURCE_MEM) 2247 return phb->ioda.m32_segsize; 2248 2249 return phb->ioda.io_segsize; 2250 } 2251 2252 /* 2253 * We are updating root port or the upstream port of the 2254 * bridge behind the root port with PHB's windows in order 2255 * to accommodate the changes on required resources during 2256 * PCI (slot) hotplug, which is connected to either root 2257 * port or the downstream ports of PCIe switch behind the 2258 * root port. 2259 */ 2260 static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus, 2261 unsigned long type) 2262 { 2263 struct pci_controller *hose = pci_bus_to_host(bus); 2264 struct pnv_phb *phb = hose->private_data; 2265 struct pci_dev *bridge = bus->self; 2266 struct resource *r, *w; 2267 bool msi_region = false; 2268 int i; 2269 2270 /* Check if we need apply fixup to the bridge's windows */ 2271 if (!pci_is_root_bus(bridge->bus) && 2272 !pci_is_root_bus(bridge->bus->self->bus)) 2273 return; 2274 2275 /* Fixup the resources */ 2276 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { 2277 r = &bridge->resource[PCI_BRIDGE_RESOURCES + i]; 2278 if (!r->flags || !r->parent) 2279 continue; 2280 2281 w = NULL; 2282 if (r->flags & type & IORESOURCE_IO) 2283 w = &hose->io_resource; 2284 else if (pnv_pci_is_m64(phb, r) && 2285 (type & IORESOURCE_PREFETCH) && 2286 phb->ioda.m64_segsize) 2287 w = &hose->mem_resources[1]; 2288 else if (r->flags & type & IORESOURCE_MEM) { 2289 w = &hose->mem_resources[0]; 2290 msi_region = true; 2291 } 2292 2293 r->start = w->start; 2294 r->end = w->end; 2295 2296 /* The 64KB 32-bits MSI region shouldn't be included in 2297 * the 32-bits bridge window. Otherwise, we can see strange 2298 * issues. One of them is EEH error observed on Garrison. 2299 * 2300 * Exclude top 1MB region which is the minimal alignment of 2301 * 32-bits bridge window. 2302 */ 2303 if (msi_region) { 2304 r->end += 0x10000; 2305 r->end -= 0x100000; 2306 } 2307 } 2308 } 2309 2310 static void pnv_pci_configure_bus(struct pci_bus *bus) 2311 { 2312 struct pci_dev *bridge = bus->self; 2313 struct pnv_ioda_pe *pe; 2314 bool all = (bridge && pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE); 2315 2316 dev_info(&bus->dev, "Configuring PE for bus\n"); 2317 2318 /* Don't assign PE to PCI bus, which doesn't have subordinate devices */ 2319 if (WARN_ON(list_empty(&bus->devices))) 2320 return; 2321 2322 /* Reserve PEs according to used M64 resources */ 2323 pnv_ioda_reserve_m64_pe(bus, NULL, all); 2324 2325 /* 2326 * Assign PE. We might run here because of partial hotplug. 2327 * For the case, we just pick up the existing PE and should 2328 * not allocate resources again. 2329 */ 2330 pe = pnv_ioda_setup_bus_PE(bus, all); 2331 if (!pe) 2332 return; 2333 2334 pnv_ioda_setup_pe_seg(pe); 2335 } 2336 2337 static resource_size_t pnv_pci_default_alignment(void) 2338 { 2339 return PAGE_SIZE; 2340 } 2341 2342 /* Prevent enabling devices for which we couldn't properly 2343 * assign a PE 2344 */ 2345 static bool pnv_pci_enable_device_hook(struct pci_dev *dev) 2346 { 2347 struct pci_dn *pdn; 2348 2349 pdn = pci_get_pdn(dev); 2350 if (!pdn || pdn->pe_number == IODA_INVALID_PE) { 2351 pci_err(dev, "pci_enable_device() blocked, no PE assigned.\n"); 2352 return false; 2353 } 2354 2355 return true; 2356 } 2357 2358 static bool pnv_ocapi_enable_device_hook(struct pci_dev *dev) 2359 { 2360 struct pci_dn *pdn; 2361 struct pnv_ioda_pe *pe; 2362 2363 pdn = pci_get_pdn(dev); 2364 if (!pdn) 2365 return false; 2366 2367 if (pdn->pe_number == IODA_INVALID_PE) { 2368 pe = pnv_ioda_setup_dev_PE(dev); 2369 if (!pe) 2370 return false; 2371 } 2372 return true; 2373 } 2374 2375 void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe) 2376 { 2377 struct iommu_table *tbl = pe->table_group.tables[0]; 2378 int64_t rc; 2379 2380 if (!pe->dma_setup_done) 2381 return; 2382 2383 rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0); 2384 if (rc) 2385 pe_warn(pe, "OPAL error %lld release DMA window\n", rc); 2386 2387 pnv_pci_ioda2_set_bypass(pe, false); 2388 if (pe->table_group.group) { 2389 iommu_group_put(pe->table_group.group); 2390 WARN_ON(pe->table_group.group); 2391 } 2392 2393 iommu_tce_table_put(tbl); 2394 } 2395 2396 static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe, 2397 unsigned short win, 2398 unsigned int *map) 2399 { 2400 struct pnv_phb *phb = pe->phb; 2401 int idx; 2402 int64_t rc; 2403 2404 for (idx = 0; idx < phb->ioda.total_pe_num; idx++) { 2405 if (map[idx] != pe->pe_number) 2406 continue; 2407 2408 rc = opal_pci_map_pe_mmio_window(phb->opal_id, 2409 phb->ioda.reserved_pe_idx, win, 0, idx); 2410 2411 if (rc != OPAL_SUCCESS) 2412 pe_warn(pe, "Error %lld unmapping (%d) segment#%d\n", 2413 rc, win, idx); 2414 2415 map[idx] = IODA_INVALID_PE; 2416 } 2417 } 2418 2419 static void pnv_ioda_release_pe_seg(struct pnv_ioda_pe *pe) 2420 { 2421 struct pnv_phb *phb = pe->phb; 2422 2423 if (phb->type == PNV_PHB_IODA2) { 2424 pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE, 2425 phb->ioda.m32_segmap); 2426 } 2427 } 2428 2429 static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe) 2430 { 2431 struct pnv_phb *phb = pe->phb; 2432 struct pnv_ioda_pe *slave, *tmp; 2433 2434 pe_info(pe, "Releasing PE\n"); 2435 2436 mutex_lock(&phb->ioda.pe_list_mutex); 2437 list_del(&pe->list); 2438 mutex_unlock(&phb->ioda.pe_list_mutex); 2439 2440 switch (phb->type) { 2441 case PNV_PHB_IODA2: 2442 pnv_pci_ioda2_release_pe_dma(pe); 2443 break; 2444 case PNV_PHB_NPU_OCAPI: 2445 break; 2446 default: 2447 WARN_ON(1); 2448 } 2449 2450 pnv_ioda_release_pe_seg(pe); 2451 pnv_ioda_deconfigure_pe(pe->phb, pe); 2452 2453 /* Release slave PEs in the compound PE */ 2454 if (pe->flags & PNV_IODA_PE_MASTER) { 2455 list_for_each_entry_safe(slave, tmp, &pe->slaves, list) { 2456 list_del(&slave->list); 2457 pnv_ioda_free_pe(slave); 2458 } 2459 } 2460 2461 /* 2462 * The PE for root bus can be removed because of hotplug in EEH 2463 * recovery for fenced PHB error. We need to mark the PE dead so 2464 * that it can be populated again in PCI hot add path. The PE 2465 * shouldn't be destroyed as it's the global reserved resource. 2466 */ 2467 if (phb->ioda.root_pe_idx == pe->pe_number) 2468 return; 2469 2470 pnv_ioda_free_pe(pe); 2471 } 2472 2473 static void pnv_pci_release_device(struct pci_dev *pdev) 2474 { 2475 struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); 2476 struct pci_dn *pdn = pci_get_pdn(pdev); 2477 struct pnv_ioda_pe *pe; 2478 2479 /* The VF PE state is torn down when sriov_disable() is called */ 2480 if (pdev->is_virtfn) 2481 return; 2482 2483 if (!pdn || pdn->pe_number == IODA_INVALID_PE) 2484 return; 2485 2486 #ifdef CONFIG_PCI_IOV 2487 /* 2488 * FIXME: Try move this to sriov_disable(). It's here since we allocate 2489 * the iov state at probe time since we need to fiddle with the IOV 2490 * resources. 2491 */ 2492 if (pdev->is_physfn) 2493 kfree(pdev->dev.archdata.iov_data); 2494 #endif 2495 2496 /* 2497 * PCI hotplug can happen as part of EEH error recovery. The @pdn 2498 * isn't removed and added afterwards in this scenario. We should 2499 * set the PE number in @pdn to an invalid one. Otherwise, the PE's 2500 * device count is decreased on removing devices while failing to 2501 * be increased on adding devices. It leads to unbalanced PE's device 2502 * count and eventually make normal PCI hotplug path broken. 2503 */ 2504 pe = &phb->ioda.pe_array[pdn->pe_number]; 2505 pdn->pe_number = IODA_INVALID_PE; 2506 2507 WARN_ON(--pe->device_count < 0); 2508 if (pe->device_count == 0) 2509 pnv_ioda_release_pe(pe); 2510 } 2511 2512 static void pnv_pci_ioda_shutdown(struct pci_controller *hose) 2513 { 2514 struct pnv_phb *phb = hose->private_data; 2515 2516 opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_IODA_TABLE, 2517 OPAL_ASSERT_RESET); 2518 } 2519 2520 static void pnv_pci_ioda_dma_bus_setup(struct pci_bus *bus) 2521 { 2522 struct pnv_phb *phb = pci_bus_to_pnvhb(bus); 2523 struct pnv_ioda_pe *pe; 2524 2525 list_for_each_entry(pe, &phb->ioda.pe_list, list) { 2526 if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))) 2527 continue; 2528 2529 if (!pe->pbus) 2530 continue; 2531 2532 if (bus->number == ((pe->rid >> 8) & 0xFF)) { 2533 pe->pbus = bus; 2534 break; 2535 } 2536 } 2537 } 2538 2539 #ifdef CONFIG_IOMMU_API 2540 static struct iommu_group *pnv_pci_device_group(struct pci_controller *hose, 2541 struct pci_dev *pdev) 2542 { 2543 struct pnv_phb *phb = hose->private_data; 2544 struct pnv_ioda_pe *pe; 2545 2546 if (WARN_ON(!phb)) 2547 return ERR_PTR(-ENODEV); 2548 2549 pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8)); 2550 if (!pe) 2551 return ERR_PTR(-ENODEV); 2552 2553 if (!pe->table_group.group) 2554 return ERR_PTR(-ENODEV); 2555 2556 return iommu_group_ref_get(pe->table_group.group); 2557 } 2558 #endif 2559 2560 static const struct pci_controller_ops pnv_pci_ioda_controller_ops = { 2561 .dma_dev_setup = pnv_pci_ioda_dma_dev_setup, 2562 .dma_bus_setup = pnv_pci_ioda_dma_bus_setup, 2563 .iommu_bypass_supported = pnv_pci_ioda_iommu_bypass_supported, 2564 .enable_device_hook = pnv_pci_enable_device_hook, 2565 .release_device = pnv_pci_release_device, 2566 .window_alignment = pnv_pci_window_alignment, 2567 .setup_bridge = pnv_pci_fixup_bridge_resources, 2568 .reset_secondary_bus = pnv_pci_reset_secondary_bus, 2569 .shutdown = pnv_pci_ioda_shutdown, 2570 #ifdef CONFIG_IOMMU_API 2571 .device_group = pnv_pci_device_group, 2572 #endif 2573 }; 2574 2575 static const struct pci_controller_ops pnv_npu_ocapi_ioda_controller_ops = { 2576 .enable_device_hook = pnv_ocapi_enable_device_hook, 2577 .release_device = pnv_pci_release_device, 2578 .window_alignment = pnv_pci_window_alignment, 2579 .reset_secondary_bus = pnv_pci_reset_secondary_bus, 2580 .shutdown = pnv_pci_ioda_shutdown, 2581 }; 2582 2583 static void __init pnv_pci_init_ioda_phb(struct device_node *np, 2584 u64 hub_id, int ioda_type) 2585 { 2586 struct pci_controller *hose; 2587 struct pnv_phb *phb; 2588 unsigned long size, m64map_off, m32map_off, pemap_off; 2589 struct pnv_ioda_pe *root_pe; 2590 struct resource r; 2591 const __be64 *prop64; 2592 const __be32 *prop32; 2593 int len; 2594 unsigned int segno; 2595 u64 phb_id; 2596 void *aux; 2597 long rc; 2598 2599 if (!of_device_is_available(np)) 2600 return; 2601 2602 pr_info("Initializing %s PHB (%pOF)\n", pnv_phb_names[ioda_type], np); 2603 2604 prop64 = of_get_property(np, "ibm,opal-phbid", NULL); 2605 if (!prop64) { 2606 pr_err(" Missing \"ibm,opal-phbid\" property !\n"); 2607 return; 2608 } 2609 phb_id = be64_to_cpup(prop64); 2610 pr_debug(" PHB-ID : 0x%016llx\n", phb_id); 2611 2612 phb = kzalloc(sizeof(*phb), GFP_KERNEL); 2613 if (!phb) 2614 panic("%s: Failed to allocate %zu bytes\n", __func__, 2615 sizeof(*phb)); 2616 2617 /* Allocate PCI controller */ 2618 phb->hose = hose = pcibios_alloc_controller(np); 2619 if (!phb->hose) { 2620 pr_err(" Can't allocate PCI controller for %pOF\n", 2621 np); 2622 memblock_free(phb, sizeof(struct pnv_phb)); 2623 return; 2624 } 2625 2626 spin_lock_init(&phb->lock); 2627 prop32 = of_get_property(np, "bus-range", &len); 2628 if (prop32 && len == 8) { 2629 hose->first_busno = be32_to_cpu(prop32[0]); 2630 hose->last_busno = be32_to_cpu(prop32[1]); 2631 } else { 2632 pr_warn(" Broken <bus-range> on %pOF\n", np); 2633 hose->first_busno = 0; 2634 hose->last_busno = 0xff; 2635 } 2636 hose->private_data = phb; 2637 phb->hub_id = hub_id; 2638 phb->opal_id = phb_id; 2639 phb->type = ioda_type; 2640 mutex_init(&phb->ioda.pe_alloc_mutex); 2641 2642 /* Detect specific models for error handling */ 2643 if (of_device_is_compatible(np, "ibm,p7ioc-pciex")) 2644 phb->model = PNV_PHB_MODEL_P7IOC; 2645 else if (of_device_is_compatible(np, "ibm,power8-pciex")) 2646 phb->model = PNV_PHB_MODEL_PHB3; 2647 else 2648 phb->model = PNV_PHB_MODEL_UNKNOWN; 2649 2650 /* Initialize diagnostic data buffer */ 2651 prop32 = of_get_property(np, "ibm,phb-diag-data-size", NULL); 2652 if (prop32) 2653 phb->diag_data_size = be32_to_cpup(prop32); 2654 else 2655 phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE; 2656 2657 phb->diag_data = kzalloc(phb->diag_data_size, GFP_KERNEL); 2658 if (!phb->diag_data) 2659 panic("%s: Failed to allocate %u bytes\n", __func__, 2660 phb->diag_data_size); 2661 2662 /* Parse 32-bit and IO ranges (if any) */ 2663 pci_process_bridge_OF_ranges(hose, np, !hose->global_number); 2664 2665 /* Get registers */ 2666 if (!of_address_to_resource(np, 0, &r)) { 2667 phb->regs_phys = r.start; 2668 phb->regs = ioremap(r.start, resource_size(&r)); 2669 if (phb->regs == NULL) 2670 pr_err(" Failed to map registers !\n"); 2671 } 2672 2673 /* Initialize more IODA stuff */ 2674 phb->ioda.total_pe_num = 1; 2675 prop32 = of_get_property(np, "ibm,opal-num-pes", NULL); 2676 if (prop32) 2677 phb->ioda.total_pe_num = be32_to_cpup(prop32); 2678 prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL); 2679 if (prop32) 2680 phb->ioda.reserved_pe_idx = be32_to_cpup(prop32); 2681 2682 /* Invalidate RID to PE# mapping */ 2683 for (segno = 0; segno < ARRAY_SIZE(phb->ioda.pe_rmap); segno++) 2684 phb->ioda.pe_rmap[segno] = IODA_INVALID_PE; 2685 2686 /* Parse 64-bit MMIO range */ 2687 pnv_ioda_parse_m64_window(phb); 2688 2689 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]); 2690 /* FW Has already off top 64k of M32 space (MSI space) */ 2691 phb->ioda.m32_size += 0x10000; 2692 2693 phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe_num; 2694 phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0]; 2695 phb->ioda.io_size = hose->pci_io_size; 2696 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe_num; 2697 phb->ioda.io_pci_base = 0; /* XXX calculate this ? */ 2698 2699 /* Allocate aux data & arrays. We don't have IO ports on PHB3 */ 2700 size = ALIGN(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8, 2701 sizeof(unsigned long)); 2702 m64map_off = size; 2703 size += phb->ioda.total_pe_num * sizeof(phb->ioda.m64_segmap[0]); 2704 m32map_off = size; 2705 size += phb->ioda.total_pe_num * sizeof(phb->ioda.m32_segmap[0]); 2706 pemap_off = size; 2707 size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe); 2708 aux = kzalloc(size, GFP_KERNEL); 2709 if (!aux) 2710 panic("%s: Failed to allocate %lu bytes\n", __func__, size); 2711 2712 phb->ioda.pe_alloc = aux; 2713 phb->ioda.m64_segmap = aux + m64map_off; 2714 phb->ioda.m32_segmap = aux + m32map_off; 2715 for (segno = 0; segno < phb->ioda.total_pe_num; segno++) { 2716 phb->ioda.m64_segmap[segno] = IODA_INVALID_PE; 2717 phb->ioda.m32_segmap[segno] = IODA_INVALID_PE; 2718 } 2719 phb->ioda.pe_array = aux + pemap_off; 2720 2721 /* 2722 * Choose PE number for root bus, which shouldn't have 2723 * M64 resources consumed by its child devices. To pick 2724 * the PE number adjacent to the reserved one if possible. 2725 */ 2726 pnv_ioda_reserve_pe(phb, phb->ioda.reserved_pe_idx); 2727 if (phb->ioda.reserved_pe_idx == 0) { 2728 phb->ioda.root_pe_idx = 1; 2729 pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx); 2730 } else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) { 2731 phb->ioda.root_pe_idx = phb->ioda.reserved_pe_idx - 1; 2732 pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx); 2733 } else { 2734 /* otherwise just allocate one */ 2735 root_pe = pnv_ioda_alloc_pe(phb, 1); 2736 phb->ioda.root_pe_idx = root_pe->pe_number; 2737 } 2738 2739 INIT_LIST_HEAD(&phb->ioda.pe_list); 2740 mutex_init(&phb->ioda.pe_list_mutex); 2741 2742 #if 0 /* We should really do that ... */ 2743 rc = opal_pci_set_phb_mem_window(opal->phb_id, 2744 window_type, 2745 window_num, 2746 starting_real_address, 2747 starting_pci_address, 2748 segment_size); 2749 #endif 2750 2751 pr_info(" %03d (%03d) PE's M32: 0x%x [segment=0x%x]\n", 2752 phb->ioda.total_pe_num, phb->ioda.reserved_pe_idx, 2753 phb->ioda.m32_size, phb->ioda.m32_segsize); 2754 if (phb->ioda.m64_size) 2755 pr_info(" M64: 0x%lx [segment=0x%lx]\n", 2756 phb->ioda.m64_size, phb->ioda.m64_segsize); 2757 if (phb->ioda.io_size) 2758 pr_info(" IO: 0x%x [segment=0x%x]\n", 2759 phb->ioda.io_size, phb->ioda.io_segsize); 2760 2761 2762 phb->hose->ops = &pnv_pci_ops; 2763 phb->get_pe_state = pnv_ioda_get_pe_state; 2764 phb->freeze_pe = pnv_ioda_freeze_pe; 2765 phb->unfreeze_pe = pnv_ioda_unfreeze_pe; 2766 2767 /* Setup MSI support */ 2768 pnv_pci_init_ioda_msis(phb); 2769 2770 /* 2771 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here 2772 * to let the PCI core do resource assignment. It's supposed 2773 * that the PCI core will do correct I/O and MMIO alignment 2774 * for the P2P bridge bars so that each PCI bus (excluding 2775 * the child P2P bridges) can form individual PE. 2776 */ 2777 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup; 2778 2779 switch (phb->type) { 2780 case PNV_PHB_NPU_OCAPI: 2781 hose->controller_ops = pnv_npu_ocapi_ioda_controller_ops; 2782 break; 2783 default: 2784 hose->controller_ops = pnv_pci_ioda_controller_ops; 2785 } 2786 2787 ppc_md.pcibios_default_alignment = pnv_pci_default_alignment; 2788 2789 #ifdef CONFIG_PCI_IOV 2790 ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov; 2791 ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment; 2792 ppc_md.pcibios_sriov_enable = pnv_pcibios_sriov_enable; 2793 ppc_md.pcibios_sriov_disable = pnv_pcibios_sriov_disable; 2794 #endif 2795 2796 pci_add_flags(PCI_REASSIGN_ALL_RSRC); 2797 2798 /* Reset IODA tables to a clean state */ 2799 rc = opal_pci_reset(phb_id, OPAL_RESET_PCI_IODA_TABLE, OPAL_ASSERT_RESET); 2800 if (rc) 2801 pr_warn(" OPAL Error %ld performing IODA table reset !\n", rc); 2802 2803 /* 2804 * If we're running in kdump kernel, the previous kernel never 2805 * shutdown PCI devices correctly. We already got IODA table 2806 * cleaned out. So we have to issue PHB reset to stop all PCI 2807 * transactions from previous kernel. The ppc_pci_reset_phbs 2808 * kernel parameter will force this reset too. Additionally, 2809 * if the IODA reset above failed then use a bigger hammer. 2810 * This can happen if we get a PHB fatal error in very early 2811 * boot. 2812 */ 2813 if (is_kdump_kernel() || pci_reset_phbs || rc) { 2814 pr_info(" Issue PHB reset ...\n"); 2815 pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL); 2816 pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE); 2817 } 2818 2819 /* Remove M64 resource if we can't configure it successfully */ 2820 if (!phb->init_m64 || phb->init_m64(phb)) 2821 hose->mem_resources[1].flags = 0; 2822 2823 /* create pci_dn's for DT nodes under this PHB */ 2824 pci_devs_phb_init_dynamic(hose); 2825 } 2826 2827 void __init pnv_pci_init_ioda2_phb(struct device_node *np) 2828 { 2829 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2); 2830 } 2831 2832 void __init pnv_pci_init_npu2_opencapi_phb(struct device_node *np) 2833 { 2834 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU_OCAPI); 2835 } 2836 2837 static void pnv_npu2_opencapi_cfg_size_fixup(struct pci_dev *dev) 2838 { 2839 struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); 2840 2841 if (!machine_is(powernv)) 2842 return; 2843 2844 if (phb->type == PNV_PHB_NPU_OCAPI) 2845 dev->cfg_size = PCI_CFG_SPACE_EXP_SIZE; 2846 } 2847 DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pnv_npu2_opencapi_cfg_size_fixup); 2848