1 /* 2 * Copyright 2007 Dave Airlied 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 */ 24 /* 25 * Authors: Dave Airlied <airlied@linux.ie> 26 * Ben Skeggs <darktama@iinet.net.au> 27 * Jeremy Kolb <jkolb@brandeis.edu> 28 */ 29 30 #include <linux/dma-mapping.h> 31 #include <linux/swiotlb.h> 32 33 #include "nouveau_drm.h" 34 #include "nouveau_dma.h" 35 #include "nouveau_fence.h" 36 37 #include "nouveau_bo.h" 38 #include "nouveau_ttm.h" 39 #include "nouveau_gem.h" 40 41 /* 42 * NV10-NV40 tiling helpers 43 */ 44 45 static void 46 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg, 47 u32 addr, u32 size, u32 pitch, u32 flags) 48 { 49 struct nouveau_drm *drm = nouveau_drm(dev); 50 int i = reg - drm->tile.reg; 51 struct nouveau_fb *pfb = nvkm_fb(&drm->device); 52 struct nouveau_fb_tile *tile = &pfb->tile.region[i]; 53 struct nouveau_engine *engine; 54 55 nouveau_fence_unref(®->fence); 56 57 if (tile->pitch) 58 pfb->tile.fini(pfb, i, tile); 59 60 if (pitch) 61 pfb->tile.init(pfb, i, addr, size, pitch, flags, tile); 62 63 pfb->tile.prog(pfb, i, tile); 64 65 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR))) 66 engine->tile_prog(engine, i); 67 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG))) 68 engine->tile_prog(engine, i); 69 } 70 71 static struct nouveau_drm_tile * 72 nv10_bo_get_tile_region(struct drm_device *dev, int i) 73 { 74 struct nouveau_drm *drm = nouveau_drm(dev); 75 struct nouveau_drm_tile *tile = &drm->tile.reg[i]; 76 77 spin_lock(&drm->tile.lock); 78 79 if (!tile->used && 80 (!tile->fence || nouveau_fence_done(tile->fence))) 81 tile->used = true; 82 else 83 tile = NULL; 84 85 spin_unlock(&drm->tile.lock); 86 return tile; 87 } 88 89 static void 90 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, 91 struct fence *fence) 92 { 93 struct nouveau_drm *drm = nouveau_drm(dev); 94 95 if (tile) { 96 spin_lock(&drm->tile.lock); 97 tile->fence = (struct nouveau_fence *)fence_get(fence); 98 tile->used = false; 99 spin_unlock(&drm->tile.lock); 100 } 101 } 102 103 static struct nouveau_drm_tile * 104 nv10_bo_set_tiling(struct drm_device *dev, u32 addr, 105 u32 size, u32 pitch, u32 flags) 106 { 107 struct nouveau_drm *drm = nouveau_drm(dev); 108 struct nouveau_fb *pfb = nvkm_fb(&drm->device); 109 struct nouveau_drm_tile *tile, *found = NULL; 110 int i; 111 112 for (i = 0; i < pfb->tile.regions; i++) { 113 tile = nv10_bo_get_tile_region(dev, i); 114 115 if (pitch && !found) { 116 found = tile; 117 continue; 118 119 } else if (tile && pfb->tile.region[i].pitch) { 120 /* Kill an unused tile region. */ 121 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0); 122 } 123 124 nv10_bo_put_tile_region(dev, tile, NULL); 125 } 126 127 if (found) 128 nv10_bo_update_tile_region(dev, found, addr, size, 129 pitch, flags); 130 return found; 131 } 132 133 static void 134 nouveau_bo_del_ttm(struct ttm_buffer_object *bo) 135 { 136 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 137 struct drm_device *dev = drm->dev; 138 struct nouveau_bo *nvbo = nouveau_bo(bo); 139 140 if (unlikely(nvbo->gem.filp)) 141 DRM_ERROR("bo %p still attached to GEM object\n", bo); 142 WARN_ON(nvbo->pin_refcnt > 0); 143 nv10_bo_put_tile_region(dev, nvbo->tile, NULL); 144 kfree(nvbo); 145 } 146 147 static void 148 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, 149 int *align, int *size) 150 { 151 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 152 struct nvif_device *device = &drm->device; 153 154 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) { 155 if (nvbo->tile_mode) { 156 if (device->info.chipset >= 0x40) { 157 *align = 65536; 158 *size = roundup(*size, 64 * nvbo->tile_mode); 159 160 } else if (device->info.chipset >= 0x30) { 161 *align = 32768; 162 *size = roundup(*size, 64 * nvbo->tile_mode); 163 164 } else if (device->info.chipset >= 0x20) { 165 *align = 16384; 166 *size = roundup(*size, 64 * nvbo->tile_mode); 167 168 } else if (device->info.chipset >= 0x10) { 169 *align = 16384; 170 *size = roundup(*size, 32 * nvbo->tile_mode); 171 } 172 } 173 } else { 174 *size = roundup(*size, (1 << nvbo->page_shift)); 175 *align = max((1 << nvbo->page_shift), *align); 176 } 177 178 *size = roundup(*size, PAGE_SIZE); 179 } 180 181 int 182 nouveau_bo_new(struct drm_device *dev, int size, int align, 183 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, 184 struct sg_table *sg, struct reservation_object *robj, 185 struct nouveau_bo **pnvbo) 186 { 187 struct nouveau_drm *drm = nouveau_drm(dev); 188 struct nouveau_bo *nvbo; 189 size_t acc_size; 190 int ret; 191 int type = ttm_bo_type_device; 192 int lpg_shift = 12; 193 int max_size; 194 195 if (drm->client.vm) 196 lpg_shift = drm->client.vm->vmm->lpg_shift; 197 max_size = INT_MAX & ~((1 << lpg_shift) - 1); 198 199 if (size <= 0 || size > max_size) { 200 NV_WARN(drm, "skipped size %x\n", (u32)size); 201 return -EINVAL; 202 } 203 204 if (sg) 205 type = ttm_bo_type_sg; 206 207 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); 208 if (!nvbo) 209 return -ENOMEM; 210 INIT_LIST_HEAD(&nvbo->head); 211 INIT_LIST_HEAD(&nvbo->entry); 212 INIT_LIST_HEAD(&nvbo->vma_list); 213 nvbo->tile_mode = tile_mode; 214 nvbo->tile_flags = tile_flags; 215 nvbo->bo.bdev = &drm->ttm.bdev; 216 217 nvbo->page_shift = 12; 218 if (drm->client.vm) { 219 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) 220 nvbo->page_shift = drm->client.vm->vmm->lpg_shift; 221 } 222 223 nouveau_bo_fixup_align(nvbo, flags, &align, &size); 224 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; 225 nouveau_bo_placement_set(nvbo, flags, 0); 226 227 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size, 228 sizeof(struct nouveau_bo)); 229 230 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, 231 type, &nvbo->placement, 232 align >> PAGE_SHIFT, false, NULL, acc_size, sg, 233 robj, nouveau_bo_del_ttm); 234 if (ret) { 235 /* ttm will call nouveau_bo_del_ttm if it fails.. */ 236 return ret; 237 } 238 239 *pnvbo = nvbo; 240 return 0; 241 } 242 243 static void 244 set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags) 245 { 246 *n = 0; 247 248 if (type & TTM_PL_FLAG_VRAM) 249 pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags; 250 if (type & TTM_PL_FLAG_TT) 251 pl[(*n)++].flags = TTM_PL_FLAG_TT | flags; 252 if (type & TTM_PL_FLAG_SYSTEM) 253 pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags; 254 } 255 256 static void 257 set_placement_range(struct nouveau_bo *nvbo, uint32_t type) 258 { 259 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 260 u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT; 261 unsigned i, fpfn, lpfn; 262 263 if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && 264 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && 265 nvbo->bo.mem.num_pages < vram_pages / 4) { 266 /* 267 * Make sure that the color and depth buffers are handled 268 * by independent memory controller units. Up to a 9x 269 * speed up when alpha-blending and depth-test are enabled 270 * at the same time. 271 */ 272 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { 273 fpfn = vram_pages / 2; 274 lpfn = ~0; 275 } else { 276 fpfn = 0; 277 lpfn = vram_pages / 2; 278 } 279 for (i = 0; i < nvbo->placement.num_placement; ++i) { 280 nvbo->placements[i].fpfn = fpfn; 281 nvbo->placements[i].lpfn = lpfn; 282 } 283 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { 284 nvbo->busy_placements[i].fpfn = fpfn; 285 nvbo->busy_placements[i].lpfn = lpfn; 286 } 287 } 288 } 289 290 void 291 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) 292 { 293 struct ttm_placement *pl = &nvbo->placement; 294 uint32_t flags = TTM_PL_MASK_CACHING | 295 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); 296 297 pl->placement = nvbo->placements; 298 set_placement_list(nvbo->placements, &pl->num_placement, 299 type, flags); 300 301 pl->busy_placement = nvbo->busy_placements; 302 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, 303 type | busy, flags); 304 305 set_placement_range(nvbo, type); 306 } 307 308 int 309 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) 310 { 311 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 312 struct ttm_buffer_object *bo = &nvbo->bo; 313 int ret; 314 315 ret = ttm_bo_reserve(bo, false, false, false, NULL); 316 if (ret) 317 goto out; 318 319 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { 320 NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, 321 1 << bo->mem.mem_type, memtype); 322 ret = -EINVAL; 323 goto out; 324 } 325 326 if (nvbo->pin_refcnt++) 327 goto out; 328 329 nouveau_bo_placement_set(nvbo, memtype, 0); 330 331 ret = nouveau_bo_validate(nvbo, false, false); 332 if (ret == 0) { 333 switch (bo->mem.mem_type) { 334 case TTM_PL_VRAM: 335 drm->gem.vram_available -= bo->mem.size; 336 break; 337 case TTM_PL_TT: 338 drm->gem.gart_available -= bo->mem.size; 339 break; 340 default: 341 break; 342 } 343 } 344 out: 345 ttm_bo_unreserve(bo); 346 return ret; 347 } 348 349 int 350 nouveau_bo_unpin(struct nouveau_bo *nvbo) 351 { 352 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 353 struct ttm_buffer_object *bo = &nvbo->bo; 354 int ret, ref; 355 356 ret = ttm_bo_reserve(bo, false, false, false, NULL); 357 if (ret) 358 return ret; 359 360 ref = --nvbo->pin_refcnt; 361 WARN_ON_ONCE(ref < 0); 362 if (ref) 363 goto out; 364 365 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); 366 367 ret = nouveau_bo_validate(nvbo, false, false); 368 if (ret == 0) { 369 switch (bo->mem.mem_type) { 370 case TTM_PL_VRAM: 371 drm->gem.vram_available += bo->mem.size; 372 break; 373 case TTM_PL_TT: 374 drm->gem.gart_available += bo->mem.size; 375 break; 376 default: 377 break; 378 } 379 } 380 381 out: 382 ttm_bo_unreserve(bo); 383 return ret; 384 } 385 386 int 387 nouveau_bo_map(struct nouveau_bo *nvbo) 388 { 389 int ret; 390 391 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); 392 if (ret) 393 return ret; 394 395 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); 396 ttm_bo_unreserve(&nvbo->bo); 397 return ret; 398 } 399 400 void 401 nouveau_bo_unmap(struct nouveau_bo *nvbo) 402 { 403 if (nvbo) 404 ttm_bo_kunmap(&nvbo->kmap); 405 } 406 407 int 408 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, 409 bool no_wait_gpu) 410 { 411 int ret; 412 413 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, 414 interruptible, no_wait_gpu); 415 if (ret) 416 return ret; 417 418 return 0; 419 } 420 421 u16 422 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index) 423 { 424 bool is_iomem; 425 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 426 mem = &mem[index]; 427 if (is_iomem) 428 return ioread16_native((void __force __iomem *)mem); 429 else 430 return *mem; 431 } 432 433 void 434 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) 435 { 436 bool is_iomem; 437 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 438 mem = &mem[index]; 439 if (is_iomem) 440 iowrite16_native(val, (void __force __iomem *)mem); 441 else 442 *mem = val; 443 } 444 445 u32 446 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) 447 { 448 bool is_iomem; 449 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 450 mem = &mem[index]; 451 if (is_iomem) 452 return ioread32_native((void __force __iomem *)mem); 453 else 454 return *mem; 455 } 456 457 void 458 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) 459 { 460 bool is_iomem; 461 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 462 mem = &mem[index]; 463 if (is_iomem) 464 iowrite32_native(val, (void __force __iomem *)mem); 465 else 466 *mem = val; 467 } 468 469 static struct ttm_tt * 470 nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, 471 uint32_t page_flags, struct page *dummy_read) 472 { 473 #if __OS_HAS_AGP 474 struct nouveau_drm *drm = nouveau_bdev(bdev); 475 struct drm_device *dev = drm->dev; 476 477 if (drm->agp.stat == ENABLED) { 478 return ttm_agp_tt_create(bdev, dev->agp->bridge, size, 479 page_flags, dummy_read); 480 } 481 #endif 482 483 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read); 484 } 485 486 static int 487 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) 488 { 489 /* We'll do this from user space. */ 490 return 0; 491 } 492 493 static int 494 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 495 struct ttm_mem_type_manager *man) 496 { 497 struct nouveau_drm *drm = nouveau_bdev(bdev); 498 499 switch (type) { 500 case TTM_PL_SYSTEM: 501 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 502 man->available_caching = TTM_PL_MASK_CACHING; 503 man->default_caching = TTM_PL_FLAG_CACHED; 504 break; 505 case TTM_PL_VRAM: 506 man->flags = TTM_MEMTYPE_FLAG_FIXED | 507 TTM_MEMTYPE_FLAG_MAPPABLE; 508 man->available_caching = TTM_PL_FLAG_UNCACHED | 509 TTM_PL_FLAG_WC; 510 man->default_caching = TTM_PL_FLAG_WC; 511 512 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 513 /* Some BARs do not support being ioremapped WC */ 514 if (nvkm_bar(&drm->device)->iomap_uncached) { 515 man->available_caching = TTM_PL_FLAG_UNCACHED; 516 man->default_caching = TTM_PL_FLAG_UNCACHED; 517 } 518 519 man->func = &nouveau_vram_manager; 520 man->io_reserve_fastpath = false; 521 man->use_io_reserve_lru = true; 522 } else { 523 man->func = &ttm_bo_manager_func; 524 } 525 break; 526 case TTM_PL_TT: 527 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) 528 man->func = &nouveau_gart_manager; 529 else 530 if (drm->agp.stat != ENABLED) 531 man->func = &nv04_gart_manager; 532 else 533 man->func = &ttm_bo_manager_func; 534 535 if (drm->agp.stat == ENABLED) { 536 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 537 man->available_caching = TTM_PL_FLAG_UNCACHED | 538 TTM_PL_FLAG_WC; 539 man->default_caching = TTM_PL_FLAG_WC; 540 } else { 541 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | 542 TTM_MEMTYPE_FLAG_CMA; 543 man->available_caching = TTM_PL_MASK_CACHING; 544 man->default_caching = TTM_PL_FLAG_CACHED; 545 } 546 547 break; 548 default: 549 return -EINVAL; 550 } 551 return 0; 552 } 553 554 static void 555 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) 556 { 557 struct nouveau_bo *nvbo = nouveau_bo(bo); 558 559 switch (bo->mem.mem_type) { 560 case TTM_PL_VRAM: 561 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 562 TTM_PL_FLAG_SYSTEM); 563 break; 564 default: 565 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); 566 break; 567 } 568 569 *pl = nvbo->placement; 570 } 571 572 573 static int 574 nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) 575 { 576 int ret = RING_SPACE(chan, 2); 577 if (ret == 0) { 578 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); 579 OUT_RING (chan, handle & 0x0000ffff); 580 FIRE_RING (chan); 581 } 582 return ret; 583 } 584 585 static int 586 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 587 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 588 { 589 struct nouveau_mem *node = old_mem->mm_node; 590 int ret = RING_SPACE(chan, 10); 591 if (ret == 0) { 592 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8); 593 OUT_RING (chan, upper_32_bits(node->vma[0].offset)); 594 OUT_RING (chan, lower_32_bits(node->vma[0].offset)); 595 OUT_RING (chan, upper_32_bits(node->vma[1].offset)); 596 OUT_RING (chan, lower_32_bits(node->vma[1].offset)); 597 OUT_RING (chan, PAGE_SIZE); 598 OUT_RING (chan, PAGE_SIZE); 599 OUT_RING (chan, PAGE_SIZE); 600 OUT_RING (chan, new_mem->num_pages); 601 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386); 602 } 603 return ret; 604 } 605 606 static int 607 nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle) 608 { 609 int ret = RING_SPACE(chan, 2); 610 if (ret == 0) { 611 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); 612 OUT_RING (chan, handle); 613 } 614 return ret; 615 } 616 617 static int 618 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 619 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 620 { 621 struct nouveau_mem *node = old_mem->mm_node; 622 u64 src_offset = node->vma[0].offset; 623 u64 dst_offset = node->vma[1].offset; 624 u32 page_count = new_mem->num_pages; 625 int ret; 626 627 page_count = new_mem->num_pages; 628 while (page_count) { 629 int line_count = (page_count > 8191) ? 8191 : page_count; 630 631 ret = RING_SPACE(chan, 11); 632 if (ret) 633 return ret; 634 635 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8); 636 OUT_RING (chan, upper_32_bits(src_offset)); 637 OUT_RING (chan, lower_32_bits(src_offset)); 638 OUT_RING (chan, upper_32_bits(dst_offset)); 639 OUT_RING (chan, lower_32_bits(dst_offset)); 640 OUT_RING (chan, PAGE_SIZE); 641 OUT_RING (chan, PAGE_SIZE); 642 OUT_RING (chan, PAGE_SIZE); 643 OUT_RING (chan, line_count); 644 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1); 645 OUT_RING (chan, 0x00000110); 646 647 page_count -= line_count; 648 src_offset += (PAGE_SIZE * line_count); 649 dst_offset += (PAGE_SIZE * line_count); 650 } 651 652 return 0; 653 } 654 655 static int 656 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 657 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 658 { 659 struct nouveau_mem *node = old_mem->mm_node; 660 u64 src_offset = node->vma[0].offset; 661 u64 dst_offset = node->vma[1].offset; 662 u32 page_count = new_mem->num_pages; 663 int ret; 664 665 page_count = new_mem->num_pages; 666 while (page_count) { 667 int line_count = (page_count > 2047) ? 2047 : page_count; 668 669 ret = RING_SPACE(chan, 12); 670 if (ret) 671 return ret; 672 673 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2); 674 OUT_RING (chan, upper_32_bits(dst_offset)); 675 OUT_RING (chan, lower_32_bits(dst_offset)); 676 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6); 677 OUT_RING (chan, upper_32_bits(src_offset)); 678 OUT_RING (chan, lower_32_bits(src_offset)); 679 OUT_RING (chan, PAGE_SIZE); /* src_pitch */ 680 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ 681 OUT_RING (chan, PAGE_SIZE); /* line_length */ 682 OUT_RING (chan, line_count); 683 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1); 684 OUT_RING (chan, 0x00100110); 685 686 page_count -= line_count; 687 src_offset += (PAGE_SIZE * line_count); 688 dst_offset += (PAGE_SIZE * line_count); 689 } 690 691 return 0; 692 } 693 694 static int 695 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 696 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 697 { 698 struct nouveau_mem *node = old_mem->mm_node; 699 u64 src_offset = node->vma[0].offset; 700 u64 dst_offset = node->vma[1].offset; 701 u32 page_count = new_mem->num_pages; 702 int ret; 703 704 page_count = new_mem->num_pages; 705 while (page_count) { 706 int line_count = (page_count > 8191) ? 8191 : page_count; 707 708 ret = RING_SPACE(chan, 11); 709 if (ret) 710 return ret; 711 712 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8); 713 OUT_RING (chan, upper_32_bits(src_offset)); 714 OUT_RING (chan, lower_32_bits(src_offset)); 715 OUT_RING (chan, upper_32_bits(dst_offset)); 716 OUT_RING (chan, lower_32_bits(dst_offset)); 717 OUT_RING (chan, PAGE_SIZE); 718 OUT_RING (chan, PAGE_SIZE); 719 OUT_RING (chan, PAGE_SIZE); 720 OUT_RING (chan, line_count); 721 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1); 722 OUT_RING (chan, 0x00000110); 723 724 page_count -= line_count; 725 src_offset += (PAGE_SIZE * line_count); 726 dst_offset += (PAGE_SIZE * line_count); 727 } 728 729 return 0; 730 } 731 732 static int 733 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 734 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 735 { 736 struct nouveau_mem *node = old_mem->mm_node; 737 int ret = RING_SPACE(chan, 7); 738 if (ret == 0) { 739 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6); 740 OUT_RING (chan, upper_32_bits(node->vma[0].offset)); 741 OUT_RING (chan, lower_32_bits(node->vma[0].offset)); 742 OUT_RING (chan, upper_32_bits(node->vma[1].offset)); 743 OUT_RING (chan, lower_32_bits(node->vma[1].offset)); 744 OUT_RING (chan, 0x00000000 /* COPY */); 745 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT); 746 } 747 return ret; 748 } 749 750 static int 751 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 752 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 753 { 754 struct nouveau_mem *node = old_mem->mm_node; 755 int ret = RING_SPACE(chan, 7); 756 if (ret == 0) { 757 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6); 758 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT); 759 OUT_RING (chan, upper_32_bits(node->vma[0].offset)); 760 OUT_RING (chan, lower_32_bits(node->vma[0].offset)); 761 OUT_RING (chan, upper_32_bits(node->vma[1].offset)); 762 OUT_RING (chan, lower_32_bits(node->vma[1].offset)); 763 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */); 764 } 765 return ret; 766 } 767 768 static int 769 nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) 770 { 771 int ret = RING_SPACE(chan, 6); 772 if (ret == 0) { 773 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); 774 OUT_RING (chan, handle); 775 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); 776 OUT_RING (chan, chan->drm->ntfy.handle); 777 OUT_RING (chan, chan->vram.handle); 778 OUT_RING (chan, chan->vram.handle); 779 } 780 781 return ret; 782 } 783 784 static int 785 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 786 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 787 { 788 struct nouveau_mem *node = old_mem->mm_node; 789 u64 length = (new_mem->num_pages << PAGE_SHIFT); 790 u64 src_offset = node->vma[0].offset; 791 u64 dst_offset = node->vma[1].offset; 792 int src_tiled = !!node->memtype; 793 int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype; 794 int ret; 795 796 while (length) { 797 u32 amount, stride, height; 798 799 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled)); 800 if (ret) 801 return ret; 802 803 amount = min(length, (u64)(4 * 1024 * 1024)); 804 stride = 16 * 4; 805 height = amount / stride; 806 807 if (src_tiled) { 808 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7); 809 OUT_RING (chan, 0); 810 OUT_RING (chan, 0); 811 OUT_RING (chan, stride); 812 OUT_RING (chan, height); 813 OUT_RING (chan, 1); 814 OUT_RING (chan, 0); 815 OUT_RING (chan, 0); 816 } else { 817 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1); 818 OUT_RING (chan, 1); 819 } 820 if (dst_tiled) { 821 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7); 822 OUT_RING (chan, 0); 823 OUT_RING (chan, 0); 824 OUT_RING (chan, stride); 825 OUT_RING (chan, height); 826 OUT_RING (chan, 1); 827 OUT_RING (chan, 0); 828 OUT_RING (chan, 0); 829 } else { 830 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1); 831 OUT_RING (chan, 1); 832 } 833 834 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2); 835 OUT_RING (chan, upper_32_bits(src_offset)); 836 OUT_RING (chan, upper_32_bits(dst_offset)); 837 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8); 838 OUT_RING (chan, lower_32_bits(src_offset)); 839 OUT_RING (chan, lower_32_bits(dst_offset)); 840 OUT_RING (chan, stride); 841 OUT_RING (chan, stride); 842 OUT_RING (chan, stride); 843 OUT_RING (chan, height); 844 OUT_RING (chan, 0x00000101); 845 OUT_RING (chan, 0x00000000); 846 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); 847 OUT_RING (chan, 0); 848 849 length -= amount; 850 src_offset += amount; 851 dst_offset += amount; 852 } 853 854 return 0; 855 } 856 857 static int 858 nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) 859 { 860 int ret = RING_SPACE(chan, 4); 861 if (ret == 0) { 862 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); 863 OUT_RING (chan, handle); 864 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1); 865 OUT_RING (chan, chan->drm->ntfy.handle); 866 } 867 868 return ret; 869 } 870 871 static inline uint32_t 872 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, 873 struct nouveau_channel *chan, struct ttm_mem_reg *mem) 874 { 875 if (mem->mem_type == TTM_PL_TT) 876 return NvDmaTT; 877 return chan->vram.handle; 878 } 879 880 static int 881 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 882 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 883 { 884 u32 src_offset = old_mem->start << PAGE_SHIFT; 885 u32 dst_offset = new_mem->start << PAGE_SHIFT; 886 u32 page_count = new_mem->num_pages; 887 int ret; 888 889 ret = RING_SPACE(chan, 3); 890 if (ret) 891 return ret; 892 893 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); 894 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); 895 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); 896 897 page_count = new_mem->num_pages; 898 while (page_count) { 899 int line_count = (page_count > 2047) ? 2047 : page_count; 900 901 ret = RING_SPACE(chan, 11); 902 if (ret) 903 return ret; 904 905 BEGIN_NV04(chan, NvSubCopy, 906 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); 907 OUT_RING (chan, src_offset); 908 OUT_RING (chan, dst_offset); 909 OUT_RING (chan, PAGE_SIZE); /* src_pitch */ 910 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ 911 OUT_RING (chan, PAGE_SIZE); /* line_length */ 912 OUT_RING (chan, line_count); 913 OUT_RING (chan, 0x00000101); 914 OUT_RING (chan, 0x00000000); 915 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); 916 OUT_RING (chan, 0); 917 918 page_count -= line_count; 919 src_offset += (PAGE_SIZE * line_count); 920 dst_offset += (PAGE_SIZE * line_count); 921 } 922 923 return 0; 924 } 925 926 static int 927 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, 928 struct ttm_mem_reg *mem) 929 { 930 struct nouveau_mem *old_node = bo->mem.mm_node; 931 struct nouveau_mem *new_node = mem->mm_node; 932 u64 size = (u64)mem->num_pages << PAGE_SHIFT; 933 int ret; 934 935 ret = nouveau_vm_get(drm->client.vm, size, old_node->page_shift, 936 NV_MEM_ACCESS_RW, &old_node->vma[0]); 937 if (ret) 938 return ret; 939 940 ret = nouveau_vm_get(drm->client.vm, size, new_node->page_shift, 941 NV_MEM_ACCESS_RW, &old_node->vma[1]); 942 if (ret) { 943 nouveau_vm_put(&old_node->vma[0]); 944 return ret; 945 } 946 947 nouveau_vm_map(&old_node->vma[0], old_node); 948 nouveau_vm_map(&old_node->vma[1], new_node); 949 return 0; 950 } 951 952 static int 953 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, 954 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 955 { 956 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 957 struct nouveau_channel *chan = drm->ttm.chan; 958 struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base); 959 struct nouveau_fence *fence; 960 int ret; 961 962 /* create temporary vmas for the transfer and attach them to the 963 * old nouveau_mem node, these will get cleaned up after ttm has 964 * destroyed the ttm_mem_reg 965 */ 966 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 967 ret = nouveau_bo_move_prep(drm, bo, new_mem); 968 if (ret) 969 return ret; 970 } 971 972 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); 973 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr); 974 if (ret == 0) { 975 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); 976 if (ret == 0) { 977 ret = nouveau_fence_new(chan, false, &fence); 978 if (ret == 0) { 979 ret = ttm_bo_move_accel_cleanup(bo, 980 &fence->base, 981 evict, 982 no_wait_gpu, 983 new_mem); 984 nouveau_fence_unref(&fence); 985 } 986 } 987 } 988 mutex_unlock(&cli->mutex); 989 return ret; 990 } 991 992 void 993 nouveau_bo_move_init(struct nouveau_drm *drm) 994 { 995 static const struct { 996 const char *name; 997 int engine; 998 u32 oclass; 999 int (*exec)(struct nouveau_channel *, 1000 struct ttm_buffer_object *, 1001 struct ttm_mem_reg *, struct ttm_mem_reg *); 1002 int (*init)(struct nouveau_channel *, u32 handle); 1003 } _methods[] = { 1004 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, 1005 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, 1006 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, 1007 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, 1008 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init }, 1009 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init }, 1010 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init }, 1011 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init }, 1012 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init }, 1013 {}, 1014 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init }, 1015 }, *mthd = _methods; 1016 const char *name = "CPU"; 1017 int ret; 1018 1019 do { 1020 struct nouveau_channel *chan; 1021 1022 if (mthd->engine) 1023 chan = drm->cechan; 1024 else 1025 chan = drm->channel; 1026 if (chan == NULL) 1027 continue; 1028 1029 ret = nvif_object_init(chan->object, NULL, 1030 mthd->oclass | (mthd->engine << 16), 1031 mthd->oclass, NULL, 0, 1032 &drm->ttm.copy); 1033 if (ret == 0) { 1034 ret = mthd->init(chan, drm->ttm.copy.handle); 1035 if (ret) { 1036 nvif_object_fini(&drm->ttm.copy); 1037 continue; 1038 } 1039 1040 drm->ttm.move = mthd->exec; 1041 drm->ttm.chan = chan; 1042 name = mthd->name; 1043 break; 1044 } 1045 } while ((++mthd)->exec); 1046 1047 NV_INFO(drm, "MM: using %s for buffer copies\n", name); 1048 } 1049 1050 static int 1051 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, 1052 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 1053 { 1054 struct ttm_place placement_memtype = { 1055 .fpfn = 0, 1056 .lpfn = 0, 1057 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING 1058 }; 1059 struct ttm_placement placement; 1060 struct ttm_mem_reg tmp_mem; 1061 int ret; 1062 1063 placement.num_placement = placement.num_busy_placement = 1; 1064 placement.placement = placement.busy_placement = &placement_memtype; 1065 1066 tmp_mem = *new_mem; 1067 tmp_mem.mm_node = NULL; 1068 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); 1069 if (ret) 1070 return ret; 1071 1072 ret = ttm_tt_bind(bo->ttm, &tmp_mem); 1073 if (ret) 1074 goto out; 1075 1076 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem); 1077 if (ret) 1078 goto out; 1079 1080 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); 1081 out: 1082 ttm_bo_mem_put(bo, &tmp_mem); 1083 return ret; 1084 } 1085 1086 static int 1087 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, 1088 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 1089 { 1090 struct ttm_place placement_memtype = { 1091 .fpfn = 0, 1092 .lpfn = 0, 1093 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING 1094 }; 1095 struct ttm_placement placement; 1096 struct ttm_mem_reg tmp_mem; 1097 int ret; 1098 1099 placement.num_placement = placement.num_busy_placement = 1; 1100 placement.placement = placement.busy_placement = &placement_memtype; 1101 1102 tmp_mem = *new_mem; 1103 tmp_mem.mm_node = NULL; 1104 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); 1105 if (ret) 1106 return ret; 1107 1108 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); 1109 if (ret) 1110 goto out; 1111 1112 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem); 1113 if (ret) 1114 goto out; 1115 1116 out: 1117 ttm_bo_mem_put(bo, &tmp_mem); 1118 return ret; 1119 } 1120 1121 static void 1122 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) 1123 { 1124 struct nouveau_bo *nvbo = nouveau_bo(bo); 1125 struct nouveau_vma *vma; 1126 1127 /* ttm can now (stupidly) pass the driver bos it didn't create... */ 1128 if (bo->destroy != nouveau_bo_del_ttm) 1129 return; 1130 1131 list_for_each_entry(vma, &nvbo->vma_list, head) { 1132 if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM && 1133 (new_mem->mem_type == TTM_PL_VRAM || 1134 nvbo->page_shift != vma->vm->vmm->lpg_shift)) { 1135 nouveau_vm_map(vma, new_mem->mm_node); 1136 } else { 1137 nouveau_vm_unmap(vma); 1138 } 1139 } 1140 } 1141 1142 static int 1143 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, 1144 struct nouveau_drm_tile **new_tile) 1145 { 1146 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1147 struct drm_device *dev = drm->dev; 1148 struct nouveau_bo *nvbo = nouveau_bo(bo); 1149 u64 offset = new_mem->start << PAGE_SHIFT; 1150 1151 *new_tile = NULL; 1152 if (new_mem->mem_type != TTM_PL_VRAM) 1153 return 0; 1154 1155 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { 1156 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size, 1157 nvbo->tile_mode, 1158 nvbo->tile_flags); 1159 } 1160 1161 return 0; 1162 } 1163 1164 static void 1165 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, 1166 struct nouveau_drm_tile *new_tile, 1167 struct nouveau_drm_tile **old_tile) 1168 { 1169 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1170 struct drm_device *dev = drm->dev; 1171 struct fence *fence = reservation_object_get_excl(bo->resv); 1172 1173 nv10_bo_put_tile_region(dev, *old_tile, fence); 1174 *old_tile = new_tile; 1175 } 1176 1177 static int 1178 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, 1179 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 1180 { 1181 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1182 struct nouveau_bo *nvbo = nouveau_bo(bo); 1183 struct ttm_mem_reg *old_mem = &bo->mem; 1184 struct nouveau_drm_tile *new_tile = NULL; 1185 int ret = 0; 1186 1187 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { 1188 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); 1189 if (ret) 1190 return ret; 1191 } 1192 1193 /* Fake bo copy. */ 1194 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { 1195 BUG_ON(bo->mem.mm_node != NULL); 1196 bo->mem = *new_mem; 1197 new_mem->mm_node = NULL; 1198 goto out; 1199 } 1200 1201 /* Hardware assisted copy. */ 1202 if (drm->ttm.move) { 1203 if (new_mem->mem_type == TTM_PL_SYSTEM) 1204 ret = nouveau_bo_move_flipd(bo, evict, intr, 1205 no_wait_gpu, new_mem); 1206 else if (old_mem->mem_type == TTM_PL_SYSTEM) 1207 ret = nouveau_bo_move_flips(bo, evict, intr, 1208 no_wait_gpu, new_mem); 1209 else 1210 ret = nouveau_bo_move_m2mf(bo, evict, intr, 1211 no_wait_gpu, new_mem); 1212 if (!ret) 1213 goto out; 1214 } 1215 1216 /* Fallback to software copy. */ 1217 ret = ttm_bo_wait(bo, true, intr, no_wait_gpu); 1218 if (ret == 0) 1219 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 1220 1221 out: 1222 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { 1223 if (ret) 1224 nouveau_bo_vm_cleanup(bo, NULL, &new_tile); 1225 else 1226 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); 1227 } 1228 1229 return ret; 1230 } 1231 1232 static int 1233 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) 1234 { 1235 struct nouveau_bo *nvbo = nouveau_bo(bo); 1236 1237 return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp); 1238 } 1239 1240 static int 1241 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1242 { 1243 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 1244 struct nouveau_drm *drm = nouveau_bdev(bdev); 1245 struct nouveau_mem *node = mem->mm_node; 1246 int ret; 1247 1248 mem->bus.addr = NULL; 1249 mem->bus.offset = 0; 1250 mem->bus.size = mem->num_pages << PAGE_SHIFT; 1251 mem->bus.base = 0; 1252 mem->bus.is_iomem = false; 1253 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) 1254 return -EINVAL; 1255 switch (mem->mem_type) { 1256 case TTM_PL_SYSTEM: 1257 /* System memory */ 1258 return 0; 1259 case TTM_PL_TT: 1260 #if __OS_HAS_AGP 1261 if (drm->agp.stat == ENABLED) { 1262 mem->bus.offset = mem->start << PAGE_SHIFT; 1263 mem->bus.base = drm->agp.base; 1264 mem->bus.is_iomem = !drm->dev->agp->cant_use_aperture; 1265 } 1266 #endif 1267 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype) 1268 /* untiled */ 1269 break; 1270 /* fallthrough, tiled memory */ 1271 case TTM_PL_VRAM: 1272 mem->bus.offset = mem->start << PAGE_SHIFT; 1273 mem->bus.base = nv_device_resource_start(nvkm_device(&drm->device), 1); 1274 mem->bus.is_iomem = true; 1275 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 1276 struct nouveau_bar *bar = nvkm_bar(&drm->device); 1277 1278 ret = bar->umap(bar, node, NV_MEM_ACCESS_RW, 1279 &node->bar_vma); 1280 if (ret) 1281 return ret; 1282 1283 mem->bus.offset = node->bar_vma.offset; 1284 } 1285 break; 1286 default: 1287 return -EINVAL; 1288 } 1289 return 0; 1290 } 1291 1292 static void 1293 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1294 { 1295 struct nouveau_drm *drm = nouveau_bdev(bdev); 1296 struct nouveau_bar *bar = nvkm_bar(&drm->device); 1297 struct nouveau_mem *node = mem->mm_node; 1298 1299 if (!node->bar_vma.node) 1300 return; 1301 1302 bar->unmap(bar, &node->bar_vma); 1303 } 1304 1305 static int 1306 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) 1307 { 1308 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1309 struct nouveau_bo *nvbo = nouveau_bo(bo); 1310 struct nvif_device *device = &drm->device; 1311 u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT; 1312 int i, ret; 1313 1314 /* as long as the bo isn't in vram, and isn't tiled, we've got 1315 * nothing to do here. 1316 */ 1317 if (bo->mem.mem_type != TTM_PL_VRAM) { 1318 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || 1319 !nouveau_bo_tile_layout(nvbo)) 1320 return 0; 1321 1322 if (bo->mem.mem_type == TTM_PL_SYSTEM) { 1323 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0); 1324 1325 ret = nouveau_bo_validate(nvbo, false, false); 1326 if (ret) 1327 return ret; 1328 } 1329 return 0; 1330 } 1331 1332 /* make sure bo is in mappable vram */ 1333 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA || 1334 bo->mem.start + bo->mem.num_pages < mappable) 1335 return 0; 1336 1337 for (i = 0; i < nvbo->placement.num_placement; ++i) { 1338 nvbo->placements[i].fpfn = 0; 1339 nvbo->placements[i].lpfn = mappable; 1340 } 1341 1342 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { 1343 nvbo->busy_placements[i].fpfn = 0; 1344 nvbo->busy_placements[i].lpfn = mappable; 1345 } 1346 1347 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); 1348 return nouveau_bo_validate(nvbo, false, false); 1349 } 1350 1351 static int 1352 nouveau_ttm_tt_populate(struct ttm_tt *ttm) 1353 { 1354 struct ttm_dma_tt *ttm_dma = (void *)ttm; 1355 struct nouveau_drm *drm; 1356 struct nouveau_device *device; 1357 struct drm_device *dev; 1358 struct device *pdev; 1359 unsigned i; 1360 int r; 1361 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 1362 1363 if (ttm->state != tt_unpopulated) 1364 return 0; 1365 1366 if (slave && ttm->sg) { 1367 /* make userspace faulting work */ 1368 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, 1369 ttm_dma->dma_address, ttm->num_pages); 1370 ttm->state = tt_unbound; 1371 return 0; 1372 } 1373 1374 drm = nouveau_bdev(ttm->bdev); 1375 device = nvkm_device(&drm->device); 1376 dev = drm->dev; 1377 pdev = nv_device_base(device); 1378 1379 #if __OS_HAS_AGP 1380 if (drm->agp.stat == ENABLED) { 1381 return ttm_agp_tt_populate(ttm); 1382 } 1383 #endif 1384 1385 #ifdef CONFIG_SWIOTLB 1386 if (swiotlb_nr_tbl()) { 1387 return ttm_dma_populate((void *)ttm, dev->dev); 1388 } 1389 #endif 1390 1391 r = ttm_pool_populate(ttm); 1392 if (r) { 1393 return r; 1394 } 1395 1396 for (i = 0; i < ttm->num_pages; i++) { 1397 dma_addr_t addr; 1398 1399 addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE, 1400 DMA_BIDIRECTIONAL); 1401 1402 if (dma_mapping_error(pdev, addr)) { 1403 while (--i) { 1404 dma_unmap_page(pdev, ttm_dma->dma_address[i], 1405 PAGE_SIZE, DMA_BIDIRECTIONAL); 1406 ttm_dma->dma_address[i] = 0; 1407 } 1408 ttm_pool_unpopulate(ttm); 1409 return -EFAULT; 1410 } 1411 1412 ttm_dma->dma_address[i] = addr; 1413 } 1414 return 0; 1415 } 1416 1417 static void 1418 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) 1419 { 1420 struct ttm_dma_tt *ttm_dma = (void *)ttm; 1421 struct nouveau_drm *drm; 1422 struct nouveau_device *device; 1423 struct drm_device *dev; 1424 struct device *pdev; 1425 unsigned i; 1426 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 1427 1428 if (slave) 1429 return; 1430 1431 drm = nouveau_bdev(ttm->bdev); 1432 device = nvkm_device(&drm->device); 1433 dev = drm->dev; 1434 pdev = nv_device_base(device); 1435 1436 #if __OS_HAS_AGP 1437 if (drm->agp.stat == ENABLED) { 1438 ttm_agp_tt_unpopulate(ttm); 1439 return; 1440 } 1441 #endif 1442 1443 #ifdef CONFIG_SWIOTLB 1444 if (swiotlb_nr_tbl()) { 1445 ttm_dma_unpopulate((void *)ttm, dev->dev); 1446 return; 1447 } 1448 #endif 1449 1450 for (i = 0; i < ttm->num_pages; i++) { 1451 if (ttm_dma->dma_address[i]) { 1452 dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE, 1453 DMA_BIDIRECTIONAL); 1454 } 1455 } 1456 1457 ttm_pool_unpopulate(ttm); 1458 } 1459 1460 void 1461 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive) 1462 { 1463 struct reservation_object *resv = nvbo->bo.resv; 1464 1465 if (exclusive) 1466 reservation_object_add_excl_fence(resv, &fence->base); 1467 else if (fence) 1468 reservation_object_add_shared_fence(resv, &fence->base); 1469 } 1470 1471 struct ttm_bo_driver nouveau_bo_driver = { 1472 .ttm_tt_create = &nouveau_ttm_tt_create, 1473 .ttm_tt_populate = &nouveau_ttm_tt_populate, 1474 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, 1475 .invalidate_caches = nouveau_bo_invalidate_caches, 1476 .init_mem_type = nouveau_bo_init_mem_type, 1477 .evict_flags = nouveau_bo_evict_flags, 1478 .move_notify = nouveau_bo_move_ntfy, 1479 .move = nouveau_bo_move, 1480 .verify_access = nouveau_bo_verify_access, 1481 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, 1482 .io_mem_reserve = &nouveau_ttm_io_mem_reserve, 1483 .io_mem_free = &nouveau_ttm_io_mem_free, 1484 }; 1485 1486 struct nouveau_vma * 1487 nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm) 1488 { 1489 struct nouveau_vma *vma; 1490 list_for_each_entry(vma, &nvbo->vma_list, head) { 1491 if (vma->vm == vm) 1492 return vma; 1493 } 1494 1495 return NULL; 1496 } 1497 1498 int 1499 nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm, 1500 struct nouveau_vma *vma) 1501 { 1502 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT; 1503 int ret; 1504 1505 ret = nouveau_vm_get(vm, size, nvbo->page_shift, 1506 NV_MEM_ACCESS_RW, vma); 1507 if (ret) 1508 return ret; 1509 1510 if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM && 1511 (nvbo->bo.mem.mem_type == TTM_PL_VRAM || 1512 nvbo->page_shift != vma->vm->vmm->lpg_shift)) 1513 nouveau_vm_map(vma, nvbo->bo.mem.mm_node); 1514 1515 list_add_tail(&vma->head, &nvbo->vma_list); 1516 vma->refcount = 1; 1517 return 0; 1518 } 1519 1520 void 1521 nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma) 1522 { 1523 if (vma->node) { 1524 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) 1525 nouveau_vm_unmap(vma); 1526 nouveau_vm_put(vma); 1527 list_del(&vma->head); 1528 } 1529 } 1530