1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include "vmwgfx_drv.h" 29 #include <drm/ttm/ttm_bo_driver.h> 30 #include <drm/ttm/ttm_placement.h> 31 32 static const struct ttm_place vram_placement_flags = { 33 .fpfn = 0, 34 .lpfn = 0, 35 .mem_type = TTM_PL_VRAM, 36 .flags = 0 37 }; 38 39 static const struct ttm_place sys_placement_flags = { 40 .fpfn = 0, 41 .lpfn = 0, 42 .mem_type = TTM_PL_SYSTEM, 43 .flags = 0 44 }; 45 46 static const struct ttm_place gmr_placement_flags = { 47 .fpfn = 0, 48 .lpfn = 0, 49 .mem_type = VMW_PL_GMR, 50 .flags = 0 51 }; 52 53 static const struct ttm_place mob_placement_flags = { 54 .fpfn = 0, 55 .lpfn = 0, 56 .mem_type = VMW_PL_MOB, 57 .flags = 0 58 }; 59 60 struct ttm_placement vmw_vram_placement = { 61 .num_placement = 1, 62 .placement = &vram_placement_flags, 63 .num_busy_placement = 1, 64 .busy_placement = &vram_placement_flags 65 }; 66 67 static const struct ttm_place vram_gmr_placement_flags[] = { 68 { 69 .fpfn = 0, 70 .lpfn = 0, 71 .mem_type = TTM_PL_VRAM, 72 .flags = 0 73 }, { 74 .fpfn = 0, 75 .lpfn = 0, 76 .mem_type = VMW_PL_GMR, 77 .flags = 0 78 } 79 }; 80 81 static const struct ttm_place gmr_vram_placement_flags[] = { 82 { 83 .fpfn = 0, 84 .lpfn = 0, 85 .mem_type = VMW_PL_GMR, 86 .flags = 0 87 }, { 88 .fpfn = 0, 89 .lpfn = 0, 90 .mem_type = TTM_PL_VRAM, 91 .flags = 0 92 } 93 }; 94 95 static const struct ttm_place vmw_sys_placement_flags = { 96 .fpfn = 0, 97 .lpfn = 0, 98 .mem_type = VMW_PL_SYSTEM, 99 .flags = 0 100 }; 101 102 struct ttm_placement vmw_vram_gmr_placement = { 103 .num_placement = 2, 104 .placement = vram_gmr_placement_flags, 105 .num_busy_placement = 1, 106 .busy_placement = &gmr_placement_flags 107 }; 108 109 struct ttm_placement vmw_vram_sys_placement = { 110 .num_placement = 1, 111 .placement = &vram_placement_flags, 112 .num_busy_placement = 1, 113 .busy_placement = &sys_placement_flags 114 }; 115 116 struct ttm_placement vmw_sys_placement = { 117 .num_placement = 1, 118 .placement = &sys_placement_flags, 119 .num_busy_placement = 1, 120 .busy_placement = &sys_placement_flags 121 }; 122 123 struct ttm_placement vmw_pt_sys_placement = { 124 .num_placement = 1, 125 .placement = &vmw_sys_placement_flags, 126 .num_busy_placement = 1, 127 .busy_placement = &vmw_sys_placement_flags 128 }; 129 130 static const struct ttm_place nonfixed_placement_flags[] = { 131 { 132 .fpfn = 0, 133 .lpfn = 0, 134 .mem_type = TTM_PL_SYSTEM, 135 .flags = 0 136 }, { 137 .fpfn = 0, 138 .lpfn = 0, 139 .mem_type = VMW_PL_GMR, 140 .flags = 0 141 }, { 142 .fpfn = 0, 143 .lpfn = 0, 144 .mem_type = VMW_PL_MOB, 145 .flags = 0 146 } 147 }; 148 149 struct ttm_placement vmw_srf_placement = { 150 .num_placement = 1, 151 .num_busy_placement = 2, 152 .placement = &gmr_placement_flags, 153 .busy_placement = gmr_vram_placement_flags 154 }; 155 156 struct ttm_placement vmw_mob_placement = { 157 .num_placement = 1, 158 .num_busy_placement = 1, 159 .placement = &mob_placement_flags, 160 .busy_placement = &mob_placement_flags 161 }; 162 163 struct ttm_placement vmw_nonfixed_placement = { 164 .num_placement = 3, 165 .placement = nonfixed_placement_flags, 166 .num_busy_placement = 1, 167 .busy_placement = &sys_placement_flags 168 }; 169 170 const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); 171 172 /** 173 * __vmw_piter_non_sg_next: Helper functions to advance 174 * a struct vmw_piter iterator. 175 * 176 * @viter: Pointer to the iterator. 177 * 178 * These functions return false if past the end of the list, 179 * true otherwise. Functions are selected depending on the current 180 * DMA mapping mode. 181 */ 182 static bool __vmw_piter_non_sg_next(struct vmw_piter *viter) 183 { 184 return ++(viter->i) < viter->num_pages; 185 } 186 187 static bool __vmw_piter_sg_next(struct vmw_piter *viter) 188 { 189 bool ret = __vmw_piter_non_sg_next(viter); 190 191 return __sg_page_iter_dma_next(&viter->iter) && ret; 192 } 193 194 195 static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter) 196 { 197 return viter->addrs[viter->i]; 198 } 199 200 static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter) 201 { 202 return sg_page_iter_dma_address(&viter->iter); 203 } 204 205 206 /** 207 * vmw_piter_start - Initialize a struct vmw_piter. 208 * 209 * @viter: Pointer to the iterator to initialize 210 * @vsgt: Pointer to a struct vmw_sg_table to initialize from 211 * @p_offset: Pointer offset used to update current array position 212 * 213 * Note that we're following the convention of __sg_page_iter_start, so that 214 * the iterator doesn't point to a valid page after initialization; it has 215 * to be advanced one step first. 216 */ 217 void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, 218 unsigned long p_offset) 219 { 220 viter->i = p_offset - 1; 221 viter->num_pages = vsgt->num_pages; 222 viter->pages = vsgt->pages; 223 switch (vsgt->mode) { 224 case vmw_dma_alloc_coherent: 225 viter->next = &__vmw_piter_non_sg_next; 226 viter->dma_address = &__vmw_piter_dma_addr; 227 viter->addrs = vsgt->addrs; 228 break; 229 case vmw_dma_map_populate: 230 case vmw_dma_map_bind: 231 viter->next = &__vmw_piter_sg_next; 232 viter->dma_address = &__vmw_piter_sg_addr; 233 __sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl, 234 vsgt->sgt->orig_nents, p_offset); 235 break; 236 default: 237 BUG(); 238 } 239 } 240 241 /** 242 * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for 243 * TTM pages 244 * 245 * @vmw_tt: Pointer to a struct vmw_ttm_backend 246 * 247 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma. 248 */ 249 static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt) 250 { 251 struct device *dev = vmw_tt->dev_priv->drm.dev; 252 253 dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0); 254 vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents; 255 } 256 257 /** 258 * vmw_ttm_map_for_dma - map TTM pages to get device addresses 259 * 260 * @vmw_tt: Pointer to a struct vmw_ttm_backend 261 * 262 * This function is used to get device addresses from the kernel DMA layer. 263 * However, it's violating the DMA API in that when this operation has been 264 * performed, it's illegal for the CPU to write to the pages without first 265 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is 266 * therefore only legal to call this function if we know that the function 267 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most 268 * a CPU write buffer flush. 269 */ 270 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) 271 { 272 struct device *dev = vmw_tt->dev_priv->drm.dev; 273 274 return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0); 275 } 276 277 /** 278 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device 279 * 280 * @vmw_tt: Pointer to a struct vmw_ttm_tt 281 * 282 * Select the correct function for and make sure the TTM pages are 283 * visible to the device. Allocate storage for the device mappings. 284 * If a mapping has already been performed, indicated by the storage 285 * pointer being non NULL, the function returns success. 286 */ 287 static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) 288 { 289 struct vmw_private *dev_priv = vmw_tt->dev_priv; 290 struct vmw_sg_table *vsgt = &vmw_tt->vsgt; 291 int ret = 0; 292 293 if (vmw_tt->mapped) 294 return 0; 295 296 vsgt->mode = dev_priv->map_mode; 297 vsgt->pages = vmw_tt->dma_ttm.pages; 298 vsgt->num_pages = vmw_tt->dma_ttm.num_pages; 299 vsgt->addrs = vmw_tt->dma_ttm.dma_address; 300 vsgt->sgt = NULL; 301 302 switch (dev_priv->map_mode) { 303 case vmw_dma_map_bind: 304 case vmw_dma_map_populate: 305 vsgt->sgt = &vmw_tt->sgt; 306 ret = sg_alloc_table_from_pages_segment( 307 &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, 308 (unsigned long)vsgt->num_pages << PAGE_SHIFT, 309 dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL); 310 if (ret) 311 goto out_sg_alloc_fail; 312 313 ret = vmw_ttm_map_for_dma(vmw_tt); 314 if (unlikely(ret != 0)) 315 goto out_map_fail; 316 317 break; 318 default: 319 break; 320 } 321 322 vmw_tt->mapped = true; 323 return 0; 324 325 out_map_fail: 326 sg_free_table(vmw_tt->vsgt.sgt); 327 vmw_tt->vsgt.sgt = NULL; 328 out_sg_alloc_fail: 329 return ret; 330 } 331 332 /** 333 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings 334 * 335 * @vmw_tt: Pointer to a struct vmw_ttm_tt 336 * 337 * Tear down any previously set up device DMA mappings and free 338 * any storage space allocated for them. If there are no mappings set up, 339 * this function is a NOP. 340 */ 341 static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) 342 { 343 struct vmw_private *dev_priv = vmw_tt->dev_priv; 344 345 if (!vmw_tt->vsgt.sgt) 346 return; 347 348 switch (dev_priv->map_mode) { 349 case vmw_dma_map_bind: 350 case vmw_dma_map_populate: 351 vmw_ttm_unmap_from_dma(vmw_tt); 352 sg_free_table(vmw_tt->vsgt.sgt); 353 vmw_tt->vsgt.sgt = NULL; 354 break; 355 default: 356 break; 357 } 358 vmw_tt->mapped = false; 359 } 360 361 /** 362 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a 363 * TTM buffer object 364 * 365 * @bo: Pointer to a struct ttm_buffer_object 366 * 367 * Returns a pointer to a struct vmw_sg_table object. The object should 368 * not be freed after use. 369 * Note that for the device addresses to be valid, the buffer object must 370 * either be reserved or pinned. 371 */ 372 const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) 373 { 374 struct vmw_ttm_tt *vmw_tt = 375 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm); 376 377 return &vmw_tt->vsgt; 378 } 379 380 381 static int vmw_ttm_bind(struct ttm_device *bdev, 382 struct ttm_tt *ttm, struct ttm_resource *bo_mem) 383 { 384 struct vmw_ttm_tt *vmw_be = 385 container_of(ttm, struct vmw_ttm_tt, dma_ttm); 386 int ret = 0; 387 388 if (!bo_mem) 389 return -EINVAL; 390 391 if (vmw_be->bound) 392 return 0; 393 394 ret = vmw_ttm_map_dma(vmw_be); 395 if (unlikely(ret != 0)) 396 return ret; 397 398 vmw_be->gmr_id = bo_mem->start; 399 vmw_be->mem_type = bo_mem->mem_type; 400 401 switch (bo_mem->mem_type) { 402 case VMW_PL_GMR: 403 ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, 404 ttm->num_pages, vmw_be->gmr_id); 405 break; 406 case VMW_PL_MOB: 407 if (unlikely(vmw_be->mob == NULL)) { 408 vmw_be->mob = 409 vmw_mob_create(ttm->num_pages); 410 if (unlikely(vmw_be->mob == NULL)) 411 return -ENOMEM; 412 } 413 414 ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, 415 &vmw_be->vsgt, ttm->num_pages, 416 vmw_be->gmr_id); 417 break; 418 case VMW_PL_SYSTEM: 419 /* Nothing to be done for a system bind */ 420 break; 421 default: 422 BUG(); 423 } 424 vmw_be->bound = true; 425 return ret; 426 } 427 428 static void vmw_ttm_unbind(struct ttm_device *bdev, 429 struct ttm_tt *ttm) 430 { 431 struct vmw_ttm_tt *vmw_be = 432 container_of(ttm, struct vmw_ttm_tt, dma_ttm); 433 434 if (!vmw_be->bound) 435 return; 436 437 switch (vmw_be->mem_type) { 438 case VMW_PL_GMR: 439 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); 440 break; 441 case VMW_PL_MOB: 442 vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); 443 break; 444 case VMW_PL_SYSTEM: 445 break; 446 default: 447 BUG(); 448 } 449 450 if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) 451 vmw_ttm_unmap_dma(vmw_be); 452 vmw_be->bound = false; 453 } 454 455 456 static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) 457 { 458 struct vmw_ttm_tt *vmw_be = 459 container_of(ttm, struct vmw_ttm_tt, dma_ttm); 460 461 vmw_ttm_unmap_dma(vmw_be); 462 ttm_tt_fini(ttm); 463 if (vmw_be->mob) 464 vmw_mob_destroy(vmw_be->mob); 465 466 kfree(vmw_be); 467 } 468 469 470 static int vmw_ttm_populate(struct ttm_device *bdev, 471 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) 472 { 473 int ret; 474 475 /* TODO: maybe completely drop this ? */ 476 if (ttm_tt_is_populated(ttm)) 477 return 0; 478 479 ret = ttm_pool_alloc(&bdev->pool, ttm, ctx); 480 481 return ret; 482 } 483 484 static void vmw_ttm_unpopulate(struct ttm_device *bdev, 485 struct ttm_tt *ttm) 486 { 487 struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, 488 dma_ttm); 489 490 vmw_ttm_unbind(bdev, ttm); 491 492 if (vmw_tt->mob) { 493 vmw_mob_destroy(vmw_tt->mob); 494 vmw_tt->mob = NULL; 495 } 496 497 vmw_ttm_unmap_dma(vmw_tt); 498 499 ttm_pool_free(&bdev->pool, ttm); 500 } 501 502 static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, 503 uint32_t page_flags) 504 { 505 struct vmw_ttm_tt *vmw_be; 506 int ret; 507 508 vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL); 509 if (!vmw_be) 510 return NULL; 511 512 vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev); 513 vmw_be->mob = NULL; 514 515 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) 516 ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags, 517 ttm_cached); 518 else 519 ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags, 520 ttm_cached, 0); 521 if (unlikely(ret != 0)) 522 goto out_no_init; 523 524 return &vmw_be->dma_ttm; 525 out_no_init: 526 kfree(vmw_be); 527 return NULL; 528 } 529 530 static void vmw_evict_flags(struct ttm_buffer_object *bo, 531 struct ttm_placement *placement) 532 { 533 *placement = vmw_sys_placement; 534 } 535 536 static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) 537 { 538 struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); 539 540 switch (mem->mem_type) { 541 case TTM_PL_SYSTEM: 542 case VMW_PL_SYSTEM: 543 case VMW_PL_GMR: 544 case VMW_PL_MOB: 545 return 0; 546 case TTM_PL_VRAM: 547 mem->bus.offset = (mem->start << PAGE_SHIFT) + 548 dev_priv->vram_start; 549 mem->bus.is_iomem = true; 550 mem->bus.caching = ttm_cached; 551 break; 552 default: 553 return -EINVAL; 554 } 555 return 0; 556 } 557 558 /** 559 * vmw_move_notify - TTM move_notify_callback 560 * 561 * @bo: The TTM buffer object about to move. 562 * @old_mem: The old memory where we move from 563 * @new_mem: The struct ttm_resource indicating to what memory 564 * region the move is taking place. 565 * 566 * Calls move_notify for all subsystems needing it. 567 * (currently only resources). 568 */ 569 static void vmw_move_notify(struct ttm_buffer_object *bo, 570 struct ttm_resource *old_mem, 571 struct ttm_resource *new_mem) 572 { 573 vmw_bo_move_notify(bo, new_mem); 574 vmw_query_move_notify(bo, old_mem, new_mem); 575 } 576 577 578 /** 579 * vmw_swap_notify - TTM move_notify_callback 580 * 581 * @bo: The TTM buffer object about to be swapped out. 582 */ 583 static void vmw_swap_notify(struct ttm_buffer_object *bo) 584 { 585 vmw_bo_swap_notify(bo); 586 (void) ttm_bo_wait(bo, false, false); 587 } 588 589 static bool vmw_memtype_is_system(uint32_t mem_type) 590 { 591 return mem_type == TTM_PL_SYSTEM || mem_type == VMW_PL_SYSTEM; 592 } 593 594 static int vmw_move(struct ttm_buffer_object *bo, 595 bool evict, 596 struct ttm_operation_ctx *ctx, 597 struct ttm_resource *new_mem, 598 struct ttm_place *hop) 599 { 600 struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->resource->mem_type); 601 struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type); 602 int ret; 603 604 if (new_man->use_tt && !vmw_memtype_is_system(new_mem->mem_type)) { 605 ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem); 606 if (ret) 607 return ret; 608 } 609 610 vmw_move_notify(bo, bo->resource, new_mem); 611 612 if (old_man->use_tt && new_man->use_tt) { 613 if (vmw_memtype_is_system(bo->resource->mem_type)) { 614 ttm_bo_move_null(bo, new_mem); 615 return 0; 616 } 617 ret = ttm_bo_wait_ctx(bo, ctx); 618 if (ret) 619 goto fail; 620 621 vmw_ttm_unbind(bo->bdev, bo->ttm); 622 ttm_resource_free(bo, &bo->resource); 623 ttm_bo_assign_mem(bo, new_mem); 624 return 0; 625 } else { 626 ret = ttm_bo_move_memcpy(bo, ctx, new_mem); 627 if (ret) 628 goto fail; 629 } 630 return 0; 631 fail: 632 vmw_move_notify(bo, new_mem, bo->resource); 633 return ret; 634 } 635 636 struct ttm_device_funcs vmw_bo_driver = { 637 .ttm_tt_create = &vmw_ttm_tt_create, 638 .ttm_tt_populate = &vmw_ttm_populate, 639 .ttm_tt_unpopulate = &vmw_ttm_unpopulate, 640 .ttm_tt_destroy = &vmw_ttm_destroy, 641 .eviction_valuable = ttm_bo_eviction_valuable, 642 .evict_flags = vmw_evict_flags, 643 .move = vmw_move, 644 .swap_notify = vmw_swap_notify, 645 .io_mem_reserve = &vmw_ttm_io_mem_reserve, 646 }; 647 648 int vmw_bo_create_and_populate(struct vmw_private *dev_priv, 649 unsigned long bo_size, 650 struct ttm_buffer_object **bo_p) 651 { 652 struct ttm_operation_ctx ctx = { 653 .interruptible = false, 654 .no_wait_gpu = false 655 }; 656 struct ttm_buffer_object *bo; 657 int ret; 658 659 ret = vmw_bo_create_kernel(dev_priv, bo_size, 660 &vmw_pt_sys_placement, 661 &bo); 662 if (unlikely(ret != 0)) 663 return ret; 664 665 ret = ttm_bo_reserve(bo, false, true, NULL); 666 BUG_ON(ret != 0); 667 ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx); 668 if (likely(ret == 0)) { 669 struct vmw_ttm_tt *vmw_tt = 670 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm); 671 ret = vmw_ttm_map_dma(vmw_tt); 672 } 673 674 ttm_bo_unreserve(bo); 675 676 if (likely(ret == 0)) 677 *bo_p = bo; 678 return ret; 679 } 680