1 /* 2 * Copyright IBM Corp. 2012 3 * 4 * Author(s): 5 * Jan Glauber <jang@linux.vnet.ibm.com> 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/slab.h> 10 #include <linux/export.h> 11 #include <linux/iommu-helper.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/vmalloc.h> 14 #include <linux/pci.h> 15 #include <asm/pci_dma.h> 16 17 static struct kmem_cache *dma_region_table_cache; 18 static struct kmem_cache *dma_page_table_cache; 19 static int s390_iommu_strict; 20 21 static int zpci_refresh_global(struct zpci_dev *zdev) 22 { 23 return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma, 24 zdev->iommu_pages * PAGE_SIZE); 25 } 26 27 unsigned long *dma_alloc_cpu_table(void) 28 { 29 unsigned long *table, *entry; 30 31 table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC); 32 if (!table) 33 return NULL; 34 35 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++) 36 *entry = ZPCI_TABLE_INVALID; 37 return table; 38 } 39 40 static void dma_free_cpu_table(void *table) 41 { 42 kmem_cache_free(dma_region_table_cache, table); 43 } 44 45 static unsigned long *dma_alloc_page_table(void) 46 { 47 unsigned long *table, *entry; 48 49 table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC); 50 if (!table) 51 return NULL; 52 53 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++) 54 *entry = ZPCI_PTE_INVALID; 55 return table; 56 } 57 58 static void dma_free_page_table(void *table) 59 { 60 kmem_cache_free(dma_page_table_cache, table); 61 } 62 63 static unsigned long *dma_get_seg_table_origin(unsigned long *entry) 64 { 65 unsigned long *sto; 66 67 if (reg_entry_isvalid(*entry)) 68 sto = get_rt_sto(*entry); 69 else { 70 sto = dma_alloc_cpu_table(); 71 if (!sto) 72 return NULL; 73 74 set_rt_sto(entry, sto); 75 validate_rt_entry(entry); 76 entry_clr_protected(entry); 77 } 78 return sto; 79 } 80 81 static unsigned long *dma_get_page_table_origin(unsigned long *entry) 82 { 83 unsigned long *pto; 84 85 if (reg_entry_isvalid(*entry)) 86 pto = get_st_pto(*entry); 87 else { 88 pto = dma_alloc_page_table(); 89 if (!pto) 90 return NULL; 91 set_st_pto(entry, pto); 92 validate_st_entry(entry); 93 entry_clr_protected(entry); 94 } 95 return pto; 96 } 97 98 unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr) 99 { 100 unsigned long *sto, *pto; 101 unsigned int rtx, sx, px; 102 103 rtx = calc_rtx(dma_addr); 104 sto = dma_get_seg_table_origin(&rto[rtx]); 105 if (!sto) 106 return NULL; 107 108 sx = calc_sx(dma_addr); 109 pto = dma_get_page_table_origin(&sto[sx]); 110 if (!pto) 111 return NULL; 112 113 px = calc_px(dma_addr); 114 return &pto[px]; 115 } 116 117 void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags) 118 { 119 if (flags & ZPCI_PTE_INVALID) { 120 invalidate_pt_entry(entry); 121 } else { 122 set_pt_pfaa(entry, page_addr); 123 validate_pt_entry(entry); 124 } 125 126 if (flags & ZPCI_TABLE_PROTECTED) 127 entry_set_protected(entry); 128 else 129 entry_clr_protected(entry); 130 } 131 132 static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, 133 dma_addr_t dma_addr, size_t size, int flags) 134 { 135 unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 136 u8 *page_addr = (u8 *) (pa & PAGE_MASK); 137 dma_addr_t start_dma_addr = dma_addr; 138 unsigned long irq_flags; 139 unsigned long *entry; 140 int i, rc = 0; 141 142 if (!nr_pages) 143 return -EINVAL; 144 145 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags); 146 if (!zdev->dma_table) { 147 rc = -EINVAL; 148 goto no_refresh; 149 } 150 151 for (i = 0; i < nr_pages; i++) { 152 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); 153 if (!entry) { 154 rc = -ENOMEM; 155 goto undo_cpu_trans; 156 } 157 dma_update_cpu_trans(entry, page_addr, flags); 158 page_addr += PAGE_SIZE; 159 dma_addr += PAGE_SIZE; 160 } 161 162 /* 163 * With zdev->tlb_refresh == 0, rpcit is not required to establish new 164 * translations when previously invalid translation-table entries are 165 * validated. With lazy unmap, it also is skipped for previously valid 166 * entries, but a global rpcit is then required before any address can 167 * be re-used, i.e. after each iommu bitmap wrap-around. 168 */ 169 if (!zdev->tlb_refresh && 170 (!s390_iommu_strict || 171 ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))) 172 goto no_refresh; 173 174 rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, 175 nr_pages * PAGE_SIZE); 176 undo_cpu_trans: 177 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) { 178 flags = ZPCI_PTE_INVALID; 179 while (i-- > 0) { 180 page_addr -= PAGE_SIZE; 181 dma_addr -= PAGE_SIZE; 182 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); 183 if (!entry) 184 break; 185 dma_update_cpu_trans(entry, page_addr, flags); 186 } 187 } 188 189 no_refresh: 190 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); 191 return rc; 192 } 193 194 void dma_free_seg_table(unsigned long entry) 195 { 196 unsigned long *sto = get_rt_sto(entry); 197 int sx; 198 199 for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++) 200 if (reg_entry_isvalid(sto[sx])) 201 dma_free_page_table(get_st_pto(sto[sx])); 202 203 dma_free_cpu_table(sto); 204 } 205 206 void dma_cleanup_tables(unsigned long *table) 207 { 208 int rtx; 209 210 if (!table) 211 return; 212 213 for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++) 214 if (reg_entry_isvalid(table[rtx])) 215 dma_free_seg_table(table[rtx]); 216 217 dma_free_cpu_table(table); 218 } 219 220 static unsigned long __dma_alloc_iommu(struct device *dev, 221 unsigned long start, int size) 222 { 223 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); 224 unsigned long boundary_size; 225 226 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 227 PAGE_SIZE) >> PAGE_SHIFT; 228 return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages, 229 start, size, zdev->start_dma >> PAGE_SHIFT, 230 boundary_size, 0); 231 } 232 233 static unsigned long dma_alloc_iommu(struct device *dev, int size) 234 { 235 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); 236 unsigned long offset, flags; 237 int wrap = 0; 238 239 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags); 240 offset = __dma_alloc_iommu(dev, zdev->next_bit, size); 241 if (offset == -1) { 242 /* wrap-around */ 243 offset = __dma_alloc_iommu(dev, 0, size); 244 wrap = 1; 245 } 246 247 if (offset != -1) { 248 zdev->next_bit = offset + size; 249 if (!zdev->tlb_refresh && !s390_iommu_strict && wrap) 250 /* global flush after wrap-around with lazy unmap */ 251 zpci_refresh_global(zdev); 252 } 253 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); 254 return offset; 255 } 256 257 static void dma_free_iommu(struct device *dev, unsigned long offset, int size) 258 { 259 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); 260 unsigned long flags; 261 262 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags); 263 if (!zdev->iommu_bitmap) 264 goto out; 265 bitmap_clear(zdev->iommu_bitmap, offset, size); 266 /* 267 * Lazy flush for unmap: need to move next_bit to avoid address re-use 268 * until wrap-around. 269 */ 270 if (!s390_iommu_strict && offset >= zdev->next_bit) 271 zdev->next_bit = offset + size; 272 out: 273 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); 274 } 275 276 static inline void zpci_err_dma(unsigned long rc, unsigned long addr) 277 { 278 struct { 279 unsigned long rc; 280 unsigned long addr; 281 } __packed data = {rc, addr}; 282 283 zpci_err_hex(&data, sizeof(data)); 284 } 285 286 static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, 287 unsigned long offset, size_t size, 288 enum dma_data_direction direction, 289 unsigned long attrs) 290 { 291 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); 292 unsigned long nr_pages, iommu_page_index; 293 unsigned long pa = page_to_phys(page) + offset; 294 int flags = ZPCI_PTE_VALID; 295 dma_addr_t dma_addr; 296 int ret; 297 298 /* This rounds up number of pages based on size and offset */ 299 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); 300 iommu_page_index = dma_alloc_iommu(dev, nr_pages); 301 if (iommu_page_index == -1) { 302 ret = -ENOSPC; 303 goto out_err; 304 } 305 306 /* Use rounded up size */ 307 size = nr_pages * PAGE_SIZE; 308 309 dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE; 310 if (dma_addr + size > zdev->end_dma) { 311 ret = -ERANGE; 312 goto out_free; 313 } 314 315 if (direction == DMA_NONE || direction == DMA_TO_DEVICE) 316 flags |= ZPCI_TABLE_PROTECTED; 317 318 ret = dma_update_trans(zdev, pa, dma_addr, size, flags); 319 if (ret) 320 goto out_free; 321 322 atomic64_add(nr_pages, &zdev->mapped_pages); 323 return dma_addr + (offset & ~PAGE_MASK); 324 325 out_free: 326 dma_free_iommu(dev, iommu_page_index, nr_pages); 327 out_err: 328 zpci_err("map error:\n"); 329 zpci_err_dma(ret, pa); 330 return DMA_ERROR_CODE; 331 } 332 333 static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, 334 size_t size, enum dma_data_direction direction, 335 unsigned long attrs) 336 { 337 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); 338 unsigned long iommu_page_index; 339 int npages, ret; 340 341 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 342 dma_addr = dma_addr & PAGE_MASK; 343 ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, 344 ZPCI_PTE_INVALID); 345 if (ret) { 346 zpci_err("unmap error:\n"); 347 zpci_err_dma(ret, dma_addr); 348 return; 349 } 350 351 atomic64_add(npages, &zdev->unmapped_pages); 352 iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT; 353 dma_free_iommu(dev, iommu_page_index, npages); 354 } 355 356 static void *s390_dma_alloc(struct device *dev, size_t size, 357 dma_addr_t *dma_handle, gfp_t flag, 358 unsigned long attrs) 359 { 360 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); 361 struct page *page; 362 unsigned long pa; 363 dma_addr_t map; 364 365 size = PAGE_ALIGN(size); 366 page = alloc_pages(flag, get_order(size)); 367 if (!page) 368 return NULL; 369 370 pa = page_to_phys(page); 371 memset((void *) pa, 0, size); 372 373 map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0); 374 if (dma_mapping_error(dev, map)) { 375 free_pages(pa, get_order(size)); 376 return NULL; 377 } 378 379 atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages); 380 if (dma_handle) 381 *dma_handle = map; 382 return (void *) pa; 383 } 384 385 static void s390_dma_free(struct device *dev, size_t size, 386 void *pa, dma_addr_t dma_handle, 387 unsigned long attrs) 388 { 389 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); 390 391 size = PAGE_ALIGN(size); 392 atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages); 393 s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0); 394 free_pages((unsigned long) pa, get_order(size)); 395 } 396 397 static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg, 398 int nr_elements, enum dma_data_direction dir, 399 unsigned long attrs) 400 { 401 int mapped_elements = 0; 402 struct scatterlist *s; 403 int i; 404 405 for_each_sg(sg, s, nr_elements, i) { 406 struct page *page = sg_page(s); 407 s->dma_address = s390_dma_map_pages(dev, page, s->offset, 408 s->length, dir, 0); 409 if (!dma_mapping_error(dev, s->dma_address)) { 410 s->dma_length = s->length; 411 mapped_elements++; 412 } else 413 goto unmap; 414 } 415 out: 416 return mapped_elements; 417 418 unmap: 419 for_each_sg(sg, s, mapped_elements, i) { 420 if (s->dma_address) 421 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, 422 dir, 0); 423 s->dma_address = 0; 424 s->dma_length = 0; 425 } 426 mapped_elements = 0; 427 goto out; 428 } 429 430 static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg, 431 int nr_elements, enum dma_data_direction dir, 432 unsigned long attrs) 433 { 434 struct scatterlist *s; 435 int i; 436 437 for_each_sg(sg, s, nr_elements, i) { 438 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, 439 0); 440 s->dma_address = 0; 441 s->dma_length = 0; 442 } 443 } 444 445 int zpci_dma_init_device(struct zpci_dev *zdev) 446 { 447 int rc; 448 449 /* 450 * At this point, if the device is part of an IOMMU domain, this would 451 * be a strong hint towards a bug in the IOMMU API (common) code and/or 452 * simultaneous access via IOMMU and DMA API. So let's issue a warning. 453 */ 454 WARN_ON(zdev->s390_domain); 455 456 spin_lock_init(&zdev->iommu_bitmap_lock); 457 spin_lock_init(&zdev->dma_table_lock); 458 459 zdev->dma_table = dma_alloc_cpu_table(); 460 if (!zdev->dma_table) { 461 rc = -ENOMEM; 462 goto out; 463 } 464 465 /* 466 * Restrict the iommu bitmap size to the minimum of the following: 467 * - main memory size 468 * - 3-level pagetable address limit minus start_dma offset 469 * - DMA address range allowed by the hardware (clp query pci fn) 470 * 471 * Also set zdev->end_dma to the actual end address of the usable 472 * range, instead of the theoretical maximum as reported by hardware. 473 */ 474 zdev->start_dma = PAGE_ALIGN(zdev->start_dma); 475 zdev->iommu_size = min3((u64) high_memory, 476 ZPCI_TABLE_SIZE_RT - zdev->start_dma, 477 zdev->end_dma - zdev->start_dma + 1); 478 zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1; 479 zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT; 480 zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8); 481 if (!zdev->iommu_bitmap) { 482 rc = -ENOMEM; 483 goto free_dma_table; 484 } 485 486 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, 487 (u64) zdev->dma_table); 488 if (rc) 489 goto free_bitmap; 490 491 return 0; 492 free_bitmap: 493 vfree(zdev->iommu_bitmap); 494 zdev->iommu_bitmap = NULL; 495 free_dma_table: 496 dma_free_cpu_table(zdev->dma_table); 497 zdev->dma_table = NULL; 498 out: 499 return rc; 500 } 501 502 void zpci_dma_exit_device(struct zpci_dev *zdev) 503 { 504 /* 505 * At this point, if the device is part of an IOMMU domain, this would 506 * be a strong hint towards a bug in the IOMMU API (common) code and/or 507 * simultaneous access via IOMMU and DMA API. So let's issue a warning. 508 */ 509 WARN_ON(zdev->s390_domain); 510 511 zpci_unregister_ioat(zdev, 0); 512 dma_cleanup_tables(zdev->dma_table); 513 zdev->dma_table = NULL; 514 vfree(zdev->iommu_bitmap); 515 zdev->iommu_bitmap = NULL; 516 zdev->next_bit = 0; 517 } 518 519 static int __init dma_alloc_cpu_table_caches(void) 520 { 521 dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables", 522 ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN, 523 0, NULL); 524 if (!dma_region_table_cache) 525 return -ENOMEM; 526 527 dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables", 528 ZPCI_PT_SIZE, ZPCI_PT_ALIGN, 529 0, NULL); 530 if (!dma_page_table_cache) { 531 kmem_cache_destroy(dma_region_table_cache); 532 return -ENOMEM; 533 } 534 return 0; 535 } 536 537 int __init zpci_dma_init(void) 538 { 539 return dma_alloc_cpu_table_caches(); 540 } 541 542 void zpci_dma_exit(void) 543 { 544 kmem_cache_destroy(dma_page_table_cache); 545 kmem_cache_destroy(dma_region_table_cache); 546 } 547 548 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) 549 550 static int __init dma_debug_do_init(void) 551 { 552 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 553 return 0; 554 } 555 fs_initcall(dma_debug_do_init); 556 557 struct dma_map_ops s390_pci_dma_ops = { 558 .alloc = s390_dma_alloc, 559 .free = s390_dma_free, 560 .map_sg = s390_dma_map_sg, 561 .unmap_sg = s390_dma_unmap_sg, 562 .map_page = s390_dma_map_pages, 563 .unmap_page = s390_dma_unmap_pages, 564 /* if we support direct DMA this must be conditional */ 565 .is_phys = 0, 566 /* dma_supported is unconditionally true without a callback */ 567 }; 568 EXPORT_SYMBOL_GPL(s390_pci_dma_ops); 569 570 static int __init s390_iommu_setup(char *str) 571 { 572 if (!strncmp(str, "strict", 6)) 573 s390_iommu_strict = 1; 574 return 0; 575 } 576 577 __setup("s390_iommu=", s390_iommu_setup); 578