1 // SPDX-License-Identifier: GPL-2.0 2 /* pci_sun4v.c: SUN4V specific PCI controller support. 3 * 4 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net) 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/types.h> 9 #include <linux/pci.h> 10 #include <linux/init.h> 11 #include <linux/slab.h> 12 #include <linux/interrupt.h> 13 #include <linux/percpu.h> 14 #include <linux/irq.h> 15 #include <linux/msi.h> 16 #include <linux/export.h> 17 #include <linux/log2.h> 18 #include <linux/of_device.h> 19 #include <linux/dma-map-ops.h> 20 #include <asm/iommu-common.h> 21 22 #include <asm/iommu.h> 23 #include <asm/irq.h> 24 #include <asm/hypervisor.h> 25 #include <asm/prom.h> 26 27 #include "pci_impl.h" 28 #include "iommu_common.h" 29 #include "kernel.h" 30 31 #include "pci_sun4v.h" 32 33 #define DRIVER_NAME "pci_sun4v" 34 #define PFX DRIVER_NAME ": " 35 36 static unsigned long vpci_major; 37 static unsigned long vpci_minor; 38 39 struct vpci_version { 40 unsigned long major; 41 unsigned long minor; 42 }; 43 44 /* Ordered from largest major to lowest */ 45 static struct vpci_version vpci_versions[] = { 46 { .major = 2, .minor = 0 }, 47 { .major = 1, .minor = 1 }, 48 }; 49 50 static unsigned long vatu_major = 1; 51 static unsigned long vatu_minor = 1; 52 53 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) 54 55 struct iommu_batch { 56 struct device *dev; /* Device mapping is for. */ 57 unsigned long prot; /* IOMMU page protections */ 58 unsigned long entry; /* Index into IOTSB. */ 59 u64 *pglist; /* List of physical pages */ 60 unsigned long npages; /* Number of pages in list. */ 61 }; 62 63 static DEFINE_PER_CPU(struct iommu_batch, iommu_batch); 64 static int iommu_batch_initialized; 65 66 /* Interrupts must be disabled. */ 67 static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry) 68 { 69 struct iommu_batch *p = this_cpu_ptr(&iommu_batch); 70 71 p->dev = dev; 72 p->prot = prot; 73 p->entry = entry; 74 p->npages = 0; 75 } 76 77 static inline bool iommu_use_atu(struct iommu *iommu, u64 mask) 78 { 79 return iommu->atu && mask > DMA_BIT_MASK(32); 80 } 81 82 /* Interrupts must be disabled. */ 83 static long iommu_batch_flush(struct iommu_batch *p, u64 mask) 84 { 85 struct pci_pbm_info *pbm = p->dev->archdata.host_controller; 86 u64 *pglist = p->pglist; 87 u64 index_count; 88 unsigned long devhandle = pbm->devhandle; 89 unsigned long prot = p->prot; 90 unsigned long entry = p->entry; 91 unsigned long npages = p->npages; 92 unsigned long iotsb_num; 93 unsigned long ret; 94 long num; 95 96 /* VPCI maj=1, min=[0,1] only supports read and write */ 97 if (vpci_major < 2) 98 prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE); 99 100 while (npages != 0) { 101 if (!iommu_use_atu(pbm->iommu, mask)) { 102 num = pci_sun4v_iommu_map(devhandle, 103 HV_PCI_TSBID(0, entry), 104 npages, 105 prot, 106 __pa(pglist)); 107 if (unlikely(num < 0)) { 108 pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n", 109 __func__, 110 devhandle, 111 HV_PCI_TSBID(0, entry), 112 npages, prot, __pa(pglist), 113 num); 114 return -1; 115 } 116 } else { 117 index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry), 118 iotsb_num = pbm->iommu->atu->iotsb->iotsb_num; 119 ret = pci_sun4v_iotsb_map(devhandle, 120 iotsb_num, 121 index_count, 122 prot, 123 __pa(pglist), 124 &num); 125 if (unlikely(ret != HV_EOK)) { 126 pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n", 127 __func__, 128 devhandle, iotsb_num, 129 index_count, prot, 130 __pa(pglist), ret); 131 return -1; 132 } 133 } 134 entry += num; 135 npages -= num; 136 pglist += num; 137 } 138 139 p->entry = entry; 140 p->npages = 0; 141 142 return 0; 143 } 144 145 static inline void iommu_batch_new_entry(unsigned long entry, u64 mask) 146 { 147 struct iommu_batch *p = this_cpu_ptr(&iommu_batch); 148 149 if (p->entry + p->npages == entry) 150 return; 151 if (p->entry != ~0UL) 152 iommu_batch_flush(p, mask); 153 p->entry = entry; 154 } 155 156 /* Interrupts must be disabled. */ 157 static inline long iommu_batch_add(u64 phys_page, u64 mask) 158 { 159 struct iommu_batch *p = this_cpu_ptr(&iommu_batch); 160 161 BUG_ON(p->npages >= PGLIST_NENTS); 162 163 p->pglist[p->npages++] = phys_page; 164 if (p->npages == PGLIST_NENTS) 165 return iommu_batch_flush(p, mask); 166 167 return 0; 168 } 169 170 /* Interrupts must be disabled. */ 171 static inline long iommu_batch_end(u64 mask) 172 { 173 struct iommu_batch *p = this_cpu_ptr(&iommu_batch); 174 175 BUG_ON(p->npages >= PGLIST_NENTS); 176 177 return iommu_batch_flush(p, mask); 178 } 179 180 static void *dma_4v_alloc_coherent(struct device *dev, size_t size, 181 dma_addr_t *dma_addrp, gfp_t gfp, 182 unsigned long attrs) 183 { 184 u64 mask; 185 unsigned long flags, order, first_page, npages, n; 186 unsigned long prot = 0; 187 struct iommu *iommu; 188 struct iommu_map_table *tbl; 189 struct page *page; 190 void *ret; 191 long entry; 192 int nid; 193 194 size = IO_PAGE_ALIGN(size); 195 order = get_order(size); 196 if (unlikely(order >= MAX_ORDER)) 197 return NULL; 198 199 npages = size >> IO_PAGE_SHIFT; 200 201 if (attrs & DMA_ATTR_WEAK_ORDERING) 202 prot = HV_PCI_MAP_ATTR_RELAXED_ORDER; 203 204 nid = dev->archdata.numa_node; 205 page = alloc_pages_node(nid, gfp, order); 206 if (unlikely(!page)) 207 return NULL; 208 209 first_page = (unsigned long) page_address(page); 210 memset((char *)first_page, 0, PAGE_SIZE << order); 211 212 iommu = dev->archdata.iommu; 213 mask = dev->coherent_dma_mask; 214 if (!iommu_use_atu(iommu, mask)) 215 tbl = &iommu->tbl; 216 else 217 tbl = &iommu->atu->tbl; 218 219 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, 220 (unsigned long)(-1), 0); 221 222 if (unlikely(entry == IOMMU_ERROR_CODE)) 223 goto range_alloc_fail; 224 225 *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); 226 ret = (void *) first_page; 227 first_page = __pa(first_page); 228 229 local_irq_save(flags); 230 231 iommu_batch_start(dev, 232 (HV_PCI_MAP_ATTR_READ | prot | 233 HV_PCI_MAP_ATTR_WRITE), 234 entry); 235 236 for (n = 0; n < npages; n++) { 237 long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask); 238 if (unlikely(err < 0L)) 239 goto iommu_map_fail; 240 } 241 242 if (unlikely(iommu_batch_end(mask) < 0L)) 243 goto iommu_map_fail; 244 245 local_irq_restore(flags); 246 247 return ret; 248 249 iommu_map_fail: 250 local_irq_restore(flags); 251 iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); 252 253 range_alloc_fail: 254 free_pages(first_page, order); 255 return NULL; 256 } 257 258 unsigned long dma_4v_iotsb_bind(unsigned long devhandle, 259 unsigned long iotsb_num, 260 struct pci_bus *bus_dev) 261 { 262 struct pci_dev *pdev; 263 unsigned long err; 264 unsigned int bus; 265 unsigned int device; 266 unsigned int fun; 267 268 list_for_each_entry(pdev, &bus_dev->devices, bus_list) { 269 if (pdev->subordinate) { 270 /* No need to bind pci bridge */ 271 dma_4v_iotsb_bind(devhandle, iotsb_num, 272 pdev->subordinate); 273 } else { 274 bus = bus_dev->number; 275 device = PCI_SLOT(pdev->devfn); 276 fun = PCI_FUNC(pdev->devfn); 277 err = pci_sun4v_iotsb_bind(devhandle, iotsb_num, 278 HV_PCI_DEVICE_BUILD(bus, 279 device, 280 fun)); 281 282 /* If bind fails for one device it is going to fail 283 * for rest of the devices because we are sharing 284 * IOTSB. So in case of failure simply return with 285 * error. 286 */ 287 if (err) 288 return err; 289 } 290 } 291 292 return 0; 293 } 294 295 static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle, 296 dma_addr_t dvma, unsigned long iotsb_num, 297 unsigned long entry, unsigned long npages) 298 { 299 unsigned long num, flags; 300 unsigned long ret; 301 302 local_irq_save(flags); 303 do { 304 if (dvma <= DMA_BIT_MASK(32)) { 305 num = pci_sun4v_iommu_demap(devhandle, 306 HV_PCI_TSBID(0, entry), 307 npages); 308 } else { 309 ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num, 310 entry, npages, &num); 311 if (unlikely(ret != HV_EOK)) { 312 pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n", 313 ret); 314 } 315 } 316 entry += num; 317 npages -= num; 318 } while (npages != 0); 319 local_irq_restore(flags); 320 } 321 322 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, 323 dma_addr_t dvma, unsigned long attrs) 324 { 325 struct pci_pbm_info *pbm; 326 struct iommu *iommu; 327 struct atu *atu; 328 struct iommu_map_table *tbl; 329 unsigned long order, npages, entry; 330 unsigned long iotsb_num; 331 u32 devhandle; 332 333 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 334 iommu = dev->archdata.iommu; 335 pbm = dev->archdata.host_controller; 336 atu = iommu->atu; 337 devhandle = pbm->devhandle; 338 339 if (!iommu_use_atu(iommu, dvma)) { 340 tbl = &iommu->tbl; 341 iotsb_num = 0; /* we don't care for legacy iommu */ 342 } else { 343 tbl = &atu->tbl; 344 iotsb_num = atu->iotsb->iotsb_num; 345 } 346 entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT); 347 dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages); 348 iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE); 349 order = get_order(size); 350 if (order < 10) 351 free_pages((unsigned long)cpu, order); 352 } 353 354 static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, 355 unsigned long offset, size_t sz, 356 enum dma_data_direction direction, 357 unsigned long attrs) 358 { 359 struct iommu *iommu; 360 struct atu *atu; 361 struct iommu_map_table *tbl; 362 u64 mask; 363 unsigned long flags, npages, oaddr; 364 unsigned long i, base_paddr; 365 unsigned long prot; 366 dma_addr_t bus_addr, ret; 367 long entry; 368 369 iommu = dev->archdata.iommu; 370 atu = iommu->atu; 371 372 if (unlikely(direction == DMA_NONE)) 373 goto bad; 374 375 oaddr = (unsigned long)(page_address(page) + offset); 376 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); 377 npages >>= IO_PAGE_SHIFT; 378 379 mask = *dev->dma_mask; 380 if (!iommu_use_atu(iommu, mask)) 381 tbl = &iommu->tbl; 382 else 383 tbl = &atu->tbl; 384 385 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, 386 (unsigned long)(-1), 0); 387 388 if (unlikely(entry == IOMMU_ERROR_CODE)) 389 goto bad; 390 391 bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); 392 ret = bus_addr | (oaddr & ~IO_PAGE_MASK); 393 base_paddr = __pa(oaddr & IO_PAGE_MASK); 394 prot = HV_PCI_MAP_ATTR_READ; 395 if (direction != DMA_TO_DEVICE) 396 prot |= HV_PCI_MAP_ATTR_WRITE; 397 398 if (attrs & DMA_ATTR_WEAK_ORDERING) 399 prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER; 400 401 local_irq_save(flags); 402 403 iommu_batch_start(dev, prot, entry); 404 405 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { 406 long err = iommu_batch_add(base_paddr, mask); 407 if (unlikely(err < 0L)) 408 goto iommu_map_fail; 409 } 410 if (unlikely(iommu_batch_end(mask) < 0L)) 411 goto iommu_map_fail; 412 413 local_irq_restore(flags); 414 415 return ret; 416 417 bad: 418 if (printk_ratelimit()) 419 WARN_ON(1); 420 return DMA_MAPPING_ERROR; 421 422 iommu_map_fail: 423 local_irq_restore(flags); 424 iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); 425 return DMA_MAPPING_ERROR; 426 } 427 428 static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, 429 size_t sz, enum dma_data_direction direction, 430 unsigned long attrs) 431 { 432 struct pci_pbm_info *pbm; 433 struct iommu *iommu; 434 struct atu *atu; 435 struct iommu_map_table *tbl; 436 unsigned long npages; 437 unsigned long iotsb_num; 438 long entry; 439 u32 devhandle; 440 441 if (unlikely(direction == DMA_NONE)) { 442 if (printk_ratelimit()) 443 WARN_ON(1); 444 return; 445 } 446 447 iommu = dev->archdata.iommu; 448 pbm = dev->archdata.host_controller; 449 atu = iommu->atu; 450 devhandle = pbm->devhandle; 451 452 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 453 npages >>= IO_PAGE_SHIFT; 454 bus_addr &= IO_PAGE_MASK; 455 456 if (bus_addr <= DMA_BIT_MASK(32)) { 457 iotsb_num = 0; /* we don't care for legacy iommu */ 458 tbl = &iommu->tbl; 459 } else { 460 iotsb_num = atu->iotsb->iotsb_num; 461 tbl = &atu->tbl; 462 } 463 entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT; 464 dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages); 465 iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); 466 } 467 468 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, 469 int nelems, enum dma_data_direction direction, 470 unsigned long attrs) 471 { 472 struct scatterlist *s, *outs, *segstart; 473 unsigned long flags, handle, prot; 474 dma_addr_t dma_next = 0, dma_addr; 475 unsigned int max_seg_size; 476 unsigned long seg_boundary_size; 477 int outcount, incount, i; 478 struct iommu *iommu; 479 struct atu *atu; 480 struct iommu_map_table *tbl; 481 u64 mask; 482 unsigned long base_shift; 483 long err; 484 485 BUG_ON(direction == DMA_NONE); 486 487 iommu = dev->archdata.iommu; 488 if (nelems == 0 || !iommu) 489 return -EINVAL; 490 atu = iommu->atu; 491 492 prot = HV_PCI_MAP_ATTR_READ; 493 if (direction != DMA_TO_DEVICE) 494 prot |= HV_PCI_MAP_ATTR_WRITE; 495 496 if (attrs & DMA_ATTR_WEAK_ORDERING) 497 prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER; 498 499 outs = s = segstart = &sglist[0]; 500 outcount = 1; 501 incount = nelems; 502 handle = 0; 503 504 /* Init first segment length for backout at failure */ 505 outs->dma_length = 0; 506 507 local_irq_save(flags); 508 509 iommu_batch_start(dev, prot, ~0UL); 510 511 max_seg_size = dma_get_max_seg_size(dev); 512 seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT); 513 514 mask = *dev->dma_mask; 515 if (!iommu_use_atu(iommu, mask)) 516 tbl = &iommu->tbl; 517 else 518 tbl = &atu->tbl; 519 520 base_shift = tbl->table_map_base >> IO_PAGE_SHIFT; 521 522 for_each_sg(sglist, s, nelems, i) { 523 unsigned long paddr, npages, entry, out_entry = 0, slen; 524 525 slen = s->length; 526 /* Sanity check */ 527 if (slen == 0) { 528 dma_next = 0; 529 continue; 530 } 531 /* Allocate iommu entries for that segment */ 532 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); 533 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); 534 entry = iommu_tbl_range_alloc(dev, tbl, npages, 535 &handle, (unsigned long)(-1), 0); 536 537 /* Handle failure */ 538 if (unlikely(entry == IOMMU_ERROR_CODE)) { 539 pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n", 540 tbl, paddr, npages); 541 goto iommu_map_failed; 542 } 543 544 iommu_batch_new_entry(entry, mask); 545 546 /* Convert entry to a dma_addr_t */ 547 dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT); 548 dma_addr |= (s->offset & ~IO_PAGE_MASK); 549 550 /* Insert into HW table */ 551 paddr &= IO_PAGE_MASK; 552 while (npages--) { 553 err = iommu_batch_add(paddr, mask); 554 if (unlikely(err < 0L)) 555 goto iommu_map_failed; 556 paddr += IO_PAGE_SIZE; 557 } 558 559 /* If we are in an open segment, try merging */ 560 if (segstart != s) { 561 /* We cannot merge if: 562 * - allocated dma_addr isn't contiguous to previous allocation 563 */ 564 if ((dma_addr != dma_next) || 565 (outs->dma_length + s->length > max_seg_size) || 566 (is_span_boundary(out_entry, base_shift, 567 seg_boundary_size, outs, s))) { 568 /* Can't merge: create a new segment */ 569 segstart = s; 570 outcount++; 571 outs = sg_next(outs); 572 } else { 573 outs->dma_length += s->length; 574 } 575 } 576 577 if (segstart == s) { 578 /* This is a new segment, fill entries */ 579 outs->dma_address = dma_addr; 580 outs->dma_length = slen; 581 out_entry = entry; 582 } 583 584 /* Calculate next page pointer for contiguous check */ 585 dma_next = dma_addr + slen; 586 } 587 588 err = iommu_batch_end(mask); 589 590 if (unlikely(err < 0L)) 591 goto iommu_map_failed; 592 593 local_irq_restore(flags); 594 595 if (outcount < incount) { 596 outs = sg_next(outs); 597 outs->dma_length = 0; 598 } 599 600 return outcount; 601 602 iommu_map_failed: 603 for_each_sg(sglist, s, nelems, i) { 604 if (s->dma_length != 0) { 605 unsigned long vaddr, npages; 606 607 vaddr = s->dma_address & IO_PAGE_MASK; 608 npages = iommu_num_pages(s->dma_address, s->dma_length, 609 IO_PAGE_SIZE); 610 iommu_tbl_range_free(tbl, vaddr, npages, 611 IOMMU_ERROR_CODE); 612 /* XXX demap? XXX */ 613 s->dma_length = 0; 614 } 615 if (s == outs) 616 break; 617 } 618 local_irq_restore(flags); 619 620 return -EINVAL; 621 } 622 623 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, 624 int nelems, enum dma_data_direction direction, 625 unsigned long attrs) 626 { 627 struct pci_pbm_info *pbm; 628 struct scatterlist *sg; 629 struct iommu *iommu; 630 struct atu *atu; 631 unsigned long flags, entry; 632 unsigned long iotsb_num; 633 u32 devhandle; 634 635 BUG_ON(direction == DMA_NONE); 636 637 iommu = dev->archdata.iommu; 638 pbm = dev->archdata.host_controller; 639 atu = iommu->atu; 640 devhandle = pbm->devhandle; 641 642 local_irq_save(flags); 643 644 sg = sglist; 645 while (nelems--) { 646 dma_addr_t dma_handle = sg->dma_address; 647 unsigned int len = sg->dma_length; 648 unsigned long npages; 649 struct iommu_map_table *tbl; 650 unsigned long shift = IO_PAGE_SHIFT; 651 652 if (!len) 653 break; 654 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); 655 656 if (dma_handle <= DMA_BIT_MASK(32)) { 657 iotsb_num = 0; /* we don't care for legacy iommu */ 658 tbl = &iommu->tbl; 659 } else { 660 iotsb_num = atu->iotsb->iotsb_num; 661 tbl = &atu->tbl; 662 } 663 entry = ((dma_handle - tbl->table_map_base) >> shift); 664 dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num, 665 entry, npages); 666 iommu_tbl_range_free(tbl, dma_handle, npages, 667 IOMMU_ERROR_CODE); 668 sg = sg_next(sg); 669 } 670 671 local_irq_restore(flags); 672 } 673 674 static int dma_4v_supported(struct device *dev, u64 device_mask) 675 { 676 struct iommu *iommu = dev->archdata.iommu; 677 678 if (ali_sound_dma_hack(dev, device_mask)) 679 return 1; 680 if (device_mask < iommu->dma_addr_mask) 681 return 0; 682 return 1; 683 } 684 685 static const struct dma_map_ops sun4v_dma_ops = { 686 .alloc = dma_4v_alloc_coherent, 687 .free = dma_4v_free_coherent, 688 .map_page = dma_4v_map_page, 689 .unmap_page = dma_4v_unmap_page, 690 .map_sg = dma_4v_map_sg, 691 .unmap_sg = dma_4v_unmap_sg, 692 .dma_supported = dma_4v_supported, 693 }; 694 695 static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent) 696 { 697 struct property *prop; 698 struct device_node *dp; 699 700 dp = pbm->op->dev.of_node; 701 prop = of_find_property(dp, "66mhz-capable", NULL); 702 pbm->is_66mhz_capable = (prop != NULL); 703 pbm->pci_bus = pci_scan_one_pbm(pbm, parent); 704 705 /* XXX register error interrupt handlers XXX */ 706 } 707 708 static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, 709 struct iommu_map_table *iommu) 710 { 711 struct iommu_pool *pool; 712 unsigned long i, pool_nr, cnt = 0; 713 u32 devhandle; 714 715 devhandle = pbm->devhandle; 716 for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) { 717 pool = &(iommu->pools[pool_nr]); 718 for (i = pool->start; i <= pool->end; i++) { 719 unsigned long ret, io_attrs, ra; 720 721 ret = pci_sun4v_iommu_getmap(devhandle, 722 HV_PCI_TSBID(0, i), 723 &io_attrs, &ra); 724 if (ret == HV_EOK) { 725 if (page_in_phys_avail(ra)) { 726 pci_sun4v_iommu_demap(devhandle, 727 HV_PCI_TSBID(0, 728 i), 1); 729 } else { 730 cnt++; 731 __set_bit(i, iommu->map); 732 } 733 } 734 } 735 } 736 return cnt; 737 } 738 739 static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm) 740 { 741 struct atu *atu = pbm->iommu->atu; 742 struct atu_iotsb *iotsb; 743 void *table; 744 u64 table_size; 745 u64 iotsb_num; 746 unsigned long order; 747 unsigned long err; 748 749 iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL); 750 if (!iotsb) { 751 err = -ENOMEM; 752 goto out_err; 753 } 754 atu->iotsb = iotsb; 755 756 /* calculate size of IOTSB */ 757 table_size = (atu->size / IO_PAGE_SIZE) * 8; 758 order = get_order(table_size); 759 table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 760 if (!table) { 761 err = -ENOMEM; 762 goto table_failed; 763 } 764 iotsb->table = table; 765 iotsb->ra = __pa(table); 766 iotsb->dvma_size = atu->size; 767 iotsb->dvma_base = atu->base; 768 iotsb->table_size = table_size; 769 iotsb->page_size = IO_PAGE_SIZE; 770 771 /* configure and register IOTSB with HV */ 772 err = pci_sun4v_iotsb_conf(pbm->devhandle, 773 iotsb->ra, 774 iotsb->table_size, 775 iotsb->page_size, 776 iotsb->dvma_base, 777 &iotsb_num); 778 if (err) { 779 pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err); 780 goto iotsb_conf_failed; 781 } 782 iotsb->iotsb_num = iotsb_num; 783 784 err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus); 785 if (err) { 786 pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err); 787 goto iotsb_conf_failed; 788 } 789 790 return 0; 791 792 iotsb_conf_failed: 793 free_pages((unsigned long)table, order); 794 table_failed: 795 kfree(iotsb); 796 out_err: 797 return err; 798 } 799 800 static int pci_sun4v_atu_init(struct pci_pbm_info *pbm) 801 { 802 struct atu *atu = pbm->iommu->atu; 803 unsigned long err; 804 const u64 *ranges; 805 u64 map_size, num_iotte; 806 u64 dma_mask; 807 const u32 *page_size; 808 int len; 809 810 ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges", 811 &len); 812 if (!ranges) { 813 pr_err(PFX "No iommu-address-ranges\n"); 814 return -EINVAL; 815 } 816 817 page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes", 818 NULL); 819 if (!page_size) { 820 pr_err(PFX "No iommu-pagesizes\n"); 821 return -EINVAL; 822 } 823 824 /* There are 4 iommu-address-ranges supported. Each range is pair of 825 * {base, size}. The ranges[0] and ranges[1] are 32bit address space 826 * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit 827 * address ranges to support 64bit addressing. Because 'size' for 828 * address ranges[2] and ranges[3] are same we can select either of 829 * ranges[2] or ranges[3] for mapping. However due to 'size' is too 830 * large for OS to allocate IOTSB we are using fix size 32G 831 * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices 832 * to share. 833 */ 834 atu->ranges = (struct atu_ranges *)ranges; 835 atu->base = atu->ranges[3].base; 836 atu->size = ATU_64_SPACE_SIZE; 837 838 /* Create IOTSB */ 839 err = pci_sun4v_atu_alloc_iotsb(pbm); 840 if (err) { 841 pr_err(PFX "Error creating ATU IOTSB\n"); 842 return err; 843 } 844 845 /* Create ATU iommu map. 846 * One bit represents one iotte in IOTSB table. 847 */ 848 dma_mask = (roundup_pow_of_two(atu->size) - 1UL); 849 num_iotte = atu->size / IO_PAGE_SIZE; 850 map_size = num_iotte / 8; 851 atu->tbl.table_map_base = atu->base; 852 atu->dma_addr_mask = dma_mask; 853 atu->tbl.map = kzalloc(map_size, GFP_KERNEL); 854 if (!atu->tbl.map) 855 return -ENOMEM; 856 857 iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT, 858 NULL, false /* no large_pool */, 859 0 /* default npools */, 860 false /* want span boundary checking */); 861 862 return 0; 863 } 864 865 static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm) 866 { 867 static const u32 vdma_default[] = { 0x80000000, 0x80000000 }; 868 struct iommu *iommu = pbm->iommu; 869 unsigned long num_tsb_entries, sz; 870 u32 dma_mask, dma_offset; 871 const u32 *vdma; 872 873 vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL); 874 if (!vdma) 875 vdma = vdma_default; 876 877 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) { 878 printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n", 879 vdma[0], vdma[1]); 880 return -EINVAL; 881 } 882 883 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL); 884 num_tsb_entries = vdma[1] / IO_PAGE_SIZE; 885 886 dma_offset = vdma[0]; 887 888 /* Setup initial software IOMMU state. */ 889 spin_lock_init(&iommu->lock); 890 iommu->ctx_lowest_free = 1; 891 iommu->tbl.table_map_base = dma_offset; 892 iommu->dma_addr_mask = dma_mask; 893 894 /* Allocate and initialize the free area map. */ 895 sz = (num_tsb_entries + 7) / 8; 896 sz = (sz + 7UL) & ~7UL; 897 iommu->tbl.map = kzalloc(sz, GFP_KERNEL); 898 if (!iommu->tbl.map) { 899 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n"); 900 return -ENOMEM; 901 } 902 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, 903 NULL, false /* no large_pool */, 904 0 /* default npools */, 905 false /* want span boundary checking */); 906 sz = probe_existing_entries(pbm, &iommu->tbl); 907 if (sz) 908 printk("%s: Imported %lu TSB entries from OBP\n", 909 pbm->name, sz); 910 911 return 0; 912 } 913 914 #ifdef CONFIG_PCI_MSI 915 struct pci_sun4v_msiq_entry { 916 u64 version_type; 917 #define MSIQ_VERSION_MASK 0xffffffff00000000UL 918 #define MSIQ_VERSION_SHIFT 32 919 #define MSIQ_TYPE_MASK 0x00000000000000ffUL 920 #define MSIQ_TYPE_SHIFT 0 921 #define MSIQ_TYPE_NONE 0x00 922 #define MSIQ_TYPE_MSG 0x01 923 #define MSIQ_TYPE_MSI32 0x02 924 #define MSIQ_TYPE_MSI64 0x03 925 #define MSIQ_TYPE_INTX 0x08 926 #define MSIQ_TYPE_NONE2 0xff 927 928 u64 intx_sysino; 929 u64 reserved1; 930 u64 stick; 931 u64 req_id; /* bus/device/func */ 932 #define MSIQ_REQID_BUS_MASK 0xff00UL 933 #define MSIQ_REQID_BUS_SHIFT 8 934 #define MSIQ_REQID_DEVICE_MASK 0x00f8UL 935 #define MSIQ_REQID_DEVICE_SHIFT 3 936 #define MSIQ_REQID_FUNC_MASK 0x0007UL 937 #define MSIQ_REQID_FUNC_SHIFT 0 938 939 u64 msi_address; 940 941 /* The format of this value is message type dependent. 942 * For MSI bits 15:0 are the data from the MSI packet. 943 * For MSI-X bits 31:0 are the data from the MSI packet. 944 * For MSG, the message code and message routing code where: 945 * bits 39:32 is the bus/device/fn of the msg target-id 946 * bits 18:16 is the message routing code 947 * bits 7:0 is the message code 948 * For INTx the low order 2-bits are: 949 * 00 - INTA 950 * 01 - INTB 951 * 10 - INTC 952 * 11 - INTD 953 */ 954 u64 msi_data; 955 956 u64 reserved2; 957 }; 958 959 static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid, 960 unsigned long *head) 961 { 962 unsigned long err, limit; 963 964 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head); 965 if (unlikely(err)) 966 return -ENXIO; 967 968 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); 969 if (unlikely(*head >= limit)) 970 return -EFBIG; 971 972 return 0; 973 } 974 975 static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm, 976 unsigned long msiqid, unsigned long *head, 977 unsigned long *msi) 978 { 979 struct pci_sun4v_msiq_entry *ep; 980 unsigned long err, type; 981 982 /* Note: void pointer arithmetic, 'head' is a byte offset */ 983 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 984 (pbm->msiq_ent_count * 985 sizeof(struct pci_sun4v_msiq_entry))) + 986 *head); 987 988 if ((ep->version_type & MSIQ_TYPE_MASK) == 0) 989 return 0; 990 991 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT; 992 if (unlikely(type != MSIQ_TYPE_MSI32 && 993 type != MSIQ_TYPE_MSI64)) 994 return -EINVAL; 995 996 *msi = ep->msi_data; 997 998 err = pci_sun4v_msi_setstate(pbm->devhandle, 999 ep->msi_data /* msi_num */, 1000 HV_MSISTATE_IDLE); 1001 if (unlikely(err)) 1002 return -ENXIO; 1003 1004 /* Clear the entry. */ 1005 ep->version_type &= ~MSIQ_TYPE_MASK; 1006 1007 (*head) += sizeof(struct pci_sun4v_msiq_entry); 1008 if (*head >= 1009 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry))) 1010 *head = 0; 1011 1012 return 1; 1013 } 1014 1015 static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid, 1016 unsigned long head) 1017 { 1018 unsigned long err; 1019 1020 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head); 1021 if (unlikely(err)) 1022 return -EINVAL; 1023 1024 return 0; 1025 } 1026 1027 static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid, 1028 unsigned long msi, int is_msi64) 1029 { 1030 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid, 1031 (is_msi64 ? 1032 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32))) 1033 return -ENXIO; 1034 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE)) 1035 return -ENXIO; 1036 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID)) 1037 return -ENXIO; 1038 return 0; 1039 } 1040 1041 static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi) 1042 { 1043 unsigned long err, msiqid; 1044 1045 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid); 1046 if (err) 1047 return -ENXIO; 1048 1049 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID); 1050 1051 return 0; 1052 } 1053 1054 static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm) 1055 { 1056 unsigned long q_size, alloc_size, pages, order; 1057 int i; 1058 1059 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); 1060 alloc_size = (pbm->msiq_num * q_size); 1061 order = get_order(alloc_size); 1062 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); 1063 if (pages == 0UL) { 1064 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n", 1065 order); 1066 return -ENOMEM; 1067 } 1068 memset((char *)pages, 0, PAGE_SIZE << order); 1069 pbm->msi_queues = (void *) pages; 1070 1071 for (i = 0; i < pbm->msiq_num; i++) { 1072 unsigned long err, base = __pa(pages + (i * q_size)); 1073 unsigned long ret1, ret2; 1074 1075 err = pci_sun4v_msiq_conf(pbm->devhandle, 1076 pbm->msiq_first + i, 1077 base, pbm->msiq_ent_count); 1078 if (err) { 1079 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n", 1080 err); 1081 goto h_error; 1082 } 1083 1084 err = pci_sun4v_msiq_info(pbm->devhandle, 1085 pbm->msiq_first + i, 1086 &ret1, &ret2); 1087 if (err) { 1088 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n", 1089 err); 1090 goto h_error; 1091 } 1092 if (ret1 != base || ret2 != pbm->msiq_ent_count) { 1093 printk(KERN_ERR "MSI: Bogus qconf " 1094 "expected[%lx:%x] got[%lx:%lx]\n", 1095 base, pbm->msiq_ent_count, 1096 ret1, ret2); 1097 goto h_error; 1098 } 1099 } 1100 1101 return 0; 1102 1103 h_error: 1104 free_pages(pages, order); 1105 return -EINVAL; 1106 } 1107 1108 static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm) 1109 { 1110 unsigned long q_size, alloc_size, pages, order; 1111 int i; 1112 1113 for (i = 0; i < pbm->msiq_num; i++) { 1114 unsigned long msiqid = pbm->msiq_first + i; 1115 1116 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0); 1117 } 1118 1119 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); 1120 alloc_size = (pbm->msiq_num * q_size); 1121 order = get_order(alloc_size); 1122 1123 pages = (unsigned long) pbm->msi_queues; 1124 1125 free_pages(pages, order); 1126 1127 pbm->msi_queues = NULL; 1128 } 1129 1130 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm, 1131 unsigned long msiqid, 1132 unsigned long devino) 1133 { 1134 unsigned int irq = sun4v_build_irq(pbm->devhandle, devino); 1135 1136 if (!irq) 1137 return -ENOMEM; 1138 1139 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) 1140 return -EINVAL; 1141 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) 1142 return -EINVAL; 1143 1144 return irq; 1145 } 1146 1147 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = { 1148 .get_head = pci_sun4v_get_head, 1149 .dequeue_msi = pci_sun4v_dequeue_msi, 1150 .set_head = pci_sun4v_set_head, 1151 .msi_setup = pci_sun4v_msi_setup, 1152 .msi_teardown = pci_sun4v_msi_teardown, 1153 .msiq_alloc = pci_sun4v_msiq_alloc, 1154 .msiq_free = pci_sun4v_msiq_free, 1155 .msiq_build_irq = pci_sun4v_msiq_build_irq, 1156 }; 1157 1158 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) 1159 { 1160 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops); 1161 } 1162 #else /* CONFIG_PCI_MSI */ 1163 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) 1164 { 1165 } 1166 #endif /* !(CONFIG_PCI_MSI) */ 1167 1168 static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm, 1169 struct platform_device *op, u32 devhandle) 1170 { 1171 struct device_node *dp = op->dev.of_node; 1172 int err; 1173 1174 pbm->numa_node = of_node_to_nid(dp); 1175 1176 pbm->pci_ops = &sun4v_pci_ops; 1177 pbm->config_space_reg_bits = 12; 1178 1179 pbm->index = pci_num_pbms++; 1180 1181 pbm->op = op; 1182 1183 pbm->devhandle = devhandle; 1184 1185 pbm->name = dp->full_name; 1186 1187 printk("%s: SUN4V PCI Bus Module\n", pbm->name); 1188 printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node); 1189 1190 pci_determine_mem_io_space(pbm); 1191 1192 pci_get_pbm_props(pbm); 1193 1194 err = pci_sun4v_iommu_init(pbm); 1195 if (err) 1196 return err; 1197 1198 pci_sun4v_msi_init(pbm); 1199 1200 pci_sun4v_scan_bus(pbm, &op->dev); 1201 1202 /* if atu_init fails its not complete failure. 1203 * we can still continue using legacy iommu. 1204 */ 1205 if (pbm->iommu->atu) { 1206 err = pci_sun4v_atu_init(pbm); 1207 if (err) { 1208 kfree(pbm->iommu->atu); 1209 pbm->iommu->atu = NULL; 1210 pr_err(PFX "ATU init failed, err=%d\n", err); 1211 } 1212 } 1213 1214 pbm->next = pci_pbm_root; 1215 pci_pbm_root = pbm; 1216 1217 return 0; 1218 } 1219 1220 static int pci_sun4v_probe(struct platform_device *op) 1221 { 1222 const struct linux_prom64_registers *regs; 1223 static int hvapi_negotiated = 0; 1224 struct pci_pbm_info *pbm; 1225 struct device_node *dp; 1226 struct iommu *iommu; 1227 struct atu *atu; 1228 u32 devhandle; 1229 int i, err = -ENODEV; 1230 static bool hv_atu = true; 1231 1232 dp = op->dev.of_node; 1233 1234 if (!hvapi_negotiated++) { 1235 for (i = 0; i < ARRAY_SIZE(vpci_versions); i++) { 1236 vpci_major = vpci_versions[i].major; 1237 vpci_minor = vpci_versions[i].minor; 1238 1239 err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major, 1240 &vpci_minor); 1241 if (!err) 1242 break; 1243 } 1244 1245 if (err) { 1246 pr_err(PFX "Could not register hvapi, err=%d\n", err); 1247 return err; 1248 } 1249 pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n", 1250 vpci_major, vpci_minor); 1251 1252 err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor); 1253 if (err) { 1254 /* don't return an error if we fail to register the 1255 * ATU group, but ATU hcalls won't be available. 1256 */ 1257 hv_atu = false; 1258 } else { 1259 pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n", 1260 vatu_major, vatu_minor); 1261 } 1262 1263 dma_ops = &sun4v_dma_ops; 1264 } 1265 1266 regs = of_get_property(dp, "reg", NULL); 1267 err = -ENODEV; 1268 if (!regs) { 1269 printk(KERN_ERR PFX "Could not find config registers\n"); 1270 goto out_err; 1271 } 1272 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff; 1273 1274 err = -ENOMEM; 1275 if (!iommu_batch_initialized) { 1276 for_each_possible_cpu(i) { 1277 unsigned long page = get_zeroed_page(GFP_KERNEL); 1278 1279 if (!page) 1280 goto out_err; 1281 1282 per_cpu(iommu_batch, i).pglist = (u64 *) page; 1283 } 1284 iommu_batch_initialized = 1; 1285 } 1286 1287 pbm = kzalloc(sizeof(*pbm), GFP_KERNEL); 1288 if (!pbm) { 1289 printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n"); 1290 goto out_err; 1291 } 1292 1293 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL); 1294 if (!iommu) { 1295 printk(KERN_ERR PFX "Could not allocate pbm iommu\n"); 1296 goto out_free_controller; 1297 } 1298 1299 pbm->iommu = iommu; 1300 iommu->atu = NULL; 1301 if (hv_atu) { 1302 atu = kzalloc(sizeof(*atu), GFP_KERNEL); 1303 if (!atu) 1304 pr_err(PFX "Could not allocate atu\n"); 1305 else 1306 iommu->atu = atu; 1307 } 1308 1309 err = pci_sun4v_pbm_init(pbm, op, devhandle); 1310 if (err) 1311 goto out_free_iommu; 1312 1313 dev_set_drvdata(&op->dev, pbm); 1314 1315 return 0; 1316 1317 out_free_iommu: 1318 kfree(iommu->atu); 1319 kfree(pbm->iommu); 1320 1321 out_free_controller: 1322 kfree(pbm); 1323 1324 out_err: 1325 return err; 1326 } 1327 1328 static const struct of_device_id pci_sun4v_match[] = { 1329 { 1330 .name = "pci", 1331 .compatible = "SUNW,sun4v-pci", 1332 }, 1333 {}, 1334 }; 1335 1336 static struct platform_driver pci_sun4v_driver = { 1337 .driver = { 1338 .name = DRIVER_NAME, 1339 .of_match_table = pci_sun4v_match, 1340 }, 1341 .probe = pci_sun4v_probe, 1342 }; 1343 1344 static int __init pci_sun4v_init(void) 1345 { 1346 return platform_driver_register(&pci_sun4v_driver); 1347 } 1348 1349 subsys_initcall(pci_sun4v_init); 1350