1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include "vmwgfx_drv.h" 29 #include <drm/ttm/ttm_bo_driver.h> 30 #include <drm/ttm/ttm_placement.h> 31 32 static const struct ttm_place vram_placement_flags = { 33 .fpfn = 0, 34 .lpfn = 0, 35 .mem_type = TTM_PL_VRAM, 36 .flags = 0 37 }; 38 39 static const struct ttm_place sys_placement_flags = { 40 .fpfn = 0, 41 .lpfn = 0, 42 .mem_type = TTM_PL_SYSTEM, 43 .flags = 0 44 }; 45 46 static const struct ttm_place gmr_placement_flags = { 47 .fpfn = 0, 48 .lpfn = 0, 49 .mem_type = VMW_PL_GMR, 50 .flags = 0 51 }; 52 53 static const struct ttm_place mob_placement_flags = { 54 .fpfn = 0, 55 .lpfn = 0, 56 .mem_type = VMW_PL_MOB, 57 .flags = 0 58 }; 59 60 struct ttm_placement vmw_vram_placement = { 61 .num_placement = 1, 62 .placement = &vram_placement_flags, 63 .num_busy_placement = 1, 64 .busy_placement = &vram_placement_flags 65 }; 66 67 static const struct ttm_place vram_gmr_placement_flags[] = { 68 { 69 .fpfn = 0, 70 .lpfn = 0, 71 .mem_type = TTM_PL_VRAM, 72 .flags = 0 73 }, { 74 .fpfn = 0, 75 .lpfn = 0, 76 .mem_type = VMW_PL_GMR, 77 .flags = 0 78 } 79 }; 80 81 static const struct ttm_place gmr_vram_placement_flags[] = { 82 { 83 .fpfn = 0, 84 .lpfn = 0, 85 .mem_type = VMW_PL_GMR, 86 .flags = 0 87 }, { 88 .fpfn = 0, 89 .lpfn = 0, 90 .mem_type = TTM_PL_VRAM, 91 .flags = 0 92 } 93 }; 94 95 static const struct ttm_place vmw_sys_placement_flags = { 96 .fpfn = 0, 97 .lpfn = 0, 98 .mem_type = VMW_PL_SYSTEM, 99 .flags = 0 100 }; 101 102 struct ttm_placement vmw_vram_gmr_placement = { 103 .num_placement = 2, 104 .placement = vram_gmr_placement_flags, 105 .num_busy_placement = 1, 106 .busy_placement = &gmr_placement_flags 107 }; 108 109 struct ttm_placement vmw_vram_sys_placement = { 110 .num_placement = 1, 111 .placement = &vram_placement_flags, 112 .num_busy_placement = 1, 113 .busy_placement = &sys_placement_flags 114 }; 115 116 struct ttm_placement vmw_sys_placement = { 117 .num_placement = 1, 118 .placement = &sys_placement_flags, 119 .num_busy_placement = 1, 120 .busy_placement = &sys_placement_flags 121 }; 122 123 struct ttm_placement vmw_pt_sys_placement = { 124 .num_placement = 1, 125 .placement = &vmw_sys_placement_flags, 126 .num_busy_placement = 1, 127 .busy_placement = &vmw_sys_placement_flags 128 }; 129 130 static const struct ttm_place nonfixed_placement_flags[] = { 131 { 132 .fpfn = 0, 133 .lpfn = 0, 134 .mem_type = TTM_PL_SYSTEM, 135 .flags = 0 136 }, { 137 .fpfn = 0, 138 .lpfn = 0, 139 .mem_type = VMW_PL_GMR, 140 .flags = 0 141 }, { 142 .fpfn = 0, 143 .lpfn = 0, 144 .mem_type = VMW_PL_MOB, 145 .flags = 0 146 } 147 }; 148 149 struct ttm_placement vmw_srf_placement = { 150 .num_placement = 1, 151 .num_busy_placement = 2, 152 .placement = &gmr_placement_flags, 153 .busy_placement = gmr_vram_placement_flags 154 }; 155 156 struct ttm_placement vmw_mob_placement = { 157 .num_placement = 1, 158 .num_busy_placement = 1, 159 .placement = &mob_placement_flags, 160 .busy_placement = &mob_placement_flags 161 }; 162 163 struct ttm_placement vmw_nonfixed_placement = { 164 .num_placement = 3, 165 .placement = nonfixed_placement_flags, 166 .num_busy_placement = 1, 167 .busy_placement = &sys_placement_flags 168 }; 169 170 struct vmw_ttm_tt { 171 struct ttm_tt dma_ttm; 172 struct vmw_private *dev_priv; 173 int gmr_id; 174 struct vmw_mob *mob; 175 int mem_type; 176 struct sg_table sgt; 177 struct vmw_sg_table vsgt; 178 uint64_t sg_alloc_size; 179 bool mapped; 180 bool bound; 181 }; 182 183 const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); 184 185 /** 186 * __vmw_piter_non_sg_next: Helper functions to advance 187 * a struct vmw_piter iterator. 188 * 189 * @viter: Pointer to the iterator. 190 * 191 * These functions return false if past the end of the list, 192 * true otherwise. Functions are selected depending on the current 193 * DMA mapping mode. 194 */ 195 static bool __vmw_piter_non_sg_next(struct vmw_piter *viter) 196 { 197 return ++(viter->i) < viter->num_pages; 198 } 199 200 static bool __vmw_piter_sg_next(struct vmw_piter *viter) 201 { 202 bool ret = __vmw_piter_non_sg_next(viter); 203 204 return __sg_page_iter_dma_next(&viter->iter) && ret; 205 } 206 207 208 static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter) 209 { 210 return viter->addrs[viter->i]; 211 } 212 213 static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter) 214 { 215 return sg_page_iter_dma_address(&viter->iter); 216 } 217 218 219 /** 220 * vmw_piter_start - Initialize a struct vmw_piter. 221 * 222 * @viter: Pointer to the iterator to initialize 223 * @vsgt: Pointer to a struct vmw_sg_table to initialize from 224 * @p_offset: Pointer offset used to update current array position 225 * 226 * Note that we're following the convention of __sg_page_iter_start, so that 227 * the iterator doesn't point to a valid page after initialization; it has 228 * to be advanced one step first. 229 */ 230 void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, 231 unsigned long p_offset) 232 { 233 viter->i = p_offset - 1; 234 viter->num_pages = vsgt->num_pages; 235 viter->pages = vsgt->pages; 236 switch (vsgt->mode) { 237 case vmw_dma_alloc_coherent: 238 viter->next = &__vmw_piter_non_sg_next; 239 viter->dma_address = &__vmw_piter_dma_addr; 240 viter->addrs = vsgt->addrs; 241 break; 242 case vmw_dma_map_populate: 243 case vmw_dma_map_bind: 244 viter->next = &__vmw_piter_sg_next; 245 viter->dma_address = &__vmw_piter_sg_addr; 246 __sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl, 247 vsgt->sgt->orig_nents, p_offset); 248 break; 249 default: 250 BUG(); 251 } 252 } 253 254 /** 255 * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for 256 * TTM pages 257 * 258 * @vmw_tt: Pointer to a struct vmw_ttm_backend 259 * 260 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma. 261 */ 262 static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt) 263 { 264 struct device *dev = vmw_tt->dev_priv->drm.dev; 265 266 dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0); 267 vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents; 268 } 269 270 /** 271 * vmw_ttm_map_for_dma - map TTM pages to get device addresses 272 * 273 * @vmw_tt: Pointer to a struct vmw_ttm_backend 274 * 275 * This function is used to get device addresses from the kernel DMA layer. 276 * However, it's violating the DMA API in that when this operation has been 277 * performed, it's illegal for the CPU to write to the pages without first 278 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is 279 * therefore only legal to call this function if we know that the function 280 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most 281 * a CPU write buffer flush. 282 */ 283 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) 284 { 285 struct device *dev = vmw_tt->dev_priv->drm.dev; 286 287 return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0); 288 } 289 290 /** 291 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device 292 * 293 * @vmw_tt: Pointer to a struct vmw_ttm_tt 294 * 295 * Select the correct function for and make sure the TTM pages are 296 * visible to the device. Allocate storage for the device mappings. 297 * If a mapping has already been performed, indicated by the storage 298 * pointer being non NULL, the function returns success. 299 */ 300 static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) 301 { 302 struct vmw_private *dev_priv = vmw_tt->dev_priv; 303 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); 304 struct vmw_sg_table *vsgt = &vmw_tt->vsgt; 305 struct ttm_operation_ctx ctx = { 306 .interruptible = true, 307 .no_wait_gpu = false 308 }; 309 struct vmw_piter iter; 310 dma_addr_t old; 311 int ret = 0; 312 static size_t sgl_size; 313 static size_t sgt_size; 314 315 if (vmw_tt->mapped) 316 return 0; 317 318 vsgt->mode = dev_priv->map_mode; 319 vsgt->pages = vmw_tt->dma_ttm.pages; 320 vsgt->num_pages = vmw_tt->dma_ttm.num_pages; 321 vsgt->addrs = vmw_tt->dma_ttm.dma_address; 322 vsgt->sgt = &vmw_tt->sgt; 323 324 switch (dev_priv->map_mode) { 325 case vmw_dma_map_bind: 326 case vmw_dma_map_populate: 327 if (unlikely(!sgl_size)) { 328 sgl_size = ttm_round_pot(sizeof(struct scatterlist)); 329 sgt_size = ttm_round_pot(sizeof(struct sg_table)); 330 } 331 vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages; 332 ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx); 333 if (unlikely(ret != 0)) 334 return ret; 335 336 ret = sg_alloc_table_from_pages_segment( 337 &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, 338 (unsigned long)vsgt->num_pages << PAGE_SHIFT, 339 dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL); 340 if (ret) 341 goto out_sg_alloc_fail; 342 343 if (vsgt->num_pages > vmw_tt->sgt.orig_nents) { 344 uint64_t over_alloc = 345 sgl_size * (vsgt->num_pages - 346 vmw_tt->sgt.orig_nents); 347 348 ttm_mem_global_free(glob, over_alloc); 349 vmw_tt->sg_alloc_size -= over_alloc; 350 } 351 352 ret = vmw_ttm_map_for_dma(vmw_tt); 353 if (unlikely(ret != 0)) 354 goto out_map_fail; 355 356 break; 357 default: 358 break; 359 } 360 361 old = ~((dma_addr_t) 0); 362 vmw_tt->vsgt.num_regions = 0; 363 for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) { 364 dma_addr_t cur = vmw_piter_dma_addr(&iter); 365 366 if (cur != old + PAGE_SIZE) 367 vmw_tt->vsgt.num_regions++; 368 old = cur; 369 } 370 371 vmw_tt->mapped = true; 372 return 0; 373 374 out_map_fail: 375 sg_free_table(vmw_tt->vsgt.sgt); 376 vmw_tt->vsgt.sgt = NULL; 377 out_sg_alloc_fail: 378 ttm_mem_global_free(glob, vmw_tt->sg_alloc_size); 379 return ret; 380 } 381 382 /** 383 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings 384 * 385 * @vmw_tt: Pointer to a struct vmw_ttm_tt 386 * 387 * Tear down any previously set up device DMA mappings and free 388 * any storage space allocated for them. If there are no mappings set up, 389 * this function is a NOP. 390 */ 391 static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) 392 { 393 struct vmw_private *dev_priv = vmw_tt->dev_priv; 394 395 if (!vmw_tt->vsgt.sgt) 396 return; 397 398 switch (dev_priv->map_mode) { 399 case vmw_dma_map_bind: 400 case vmw_dma_map_populate: 401 vmw_ttm_unmap_from_dma(vmw_tt); 402 sg_free_table(vmw_tt->vsgt.sgt); 403 vmw_tt->vsgt.sgt = NULL; 404 ttm_mem_global_free(vmw_mem_glob(dev_priv), 405 vmw_tt->sg_alloc_size); 406 break; 407 default: 408 break; 409 } 410 vmw_tt->mapped = false; 411 } 412 413 /** 414 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a 415 * TTM buffer object 416 * 417 * @bo: Pointer to a struct ttm_buffer_object 418 * 419 * Returns a pointer to a struct vmw_sg_table object. The object should 420 * not be freed after use. 421 * Note that for the device addresses to be valid, the buffer object must 422 * either be reserved or pinned. 423 */ 424 const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) 425 { 426 struct vmw_ttm_tt *vmw_tt = 427 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm); 428 429 return &vmw_tt->vsgt; 430 } 431 432 433 static int vmw_ttm_bind(struct ttm_device *bdev, 434 struct ttm_tt *ttm, struct ttm_resource *bo_mem) 435 { 436 struct vmw_ttm_tt *vmw_be = 437 container_of(ttm, struct vmw_ttm_tt, dma_ttm); 438 int ret = 0; 439 440 if (!bo_mem) 441 return -EINVAL; 442 443 if (vmw_be->bound) 444 return 0; 445 446 ret = vmw_ttm_map_dma(vmw_be); 447 if (unlikely(ret != 0)) 448 return ret; 449 450 vmw_be->gmr_id = bo_mem->start; 451 vmw_be->mem_type = bo_mem->mem_type; 452 453 switch (bo_mem->mem_type) { 454 case VMW_PL_GMR: 455 ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, 456 ttm->num_pages, vmw_be->gmr_id); 457 break; 458 case VMW_PL_MOB: 459 if (unlikely(vmw_be->mob == NULL)) { 460 vmw_be->mob = 461 vmw_mob_create(ttm->num_pages); 462 if (unlikely(vmw_be->mob == NULL)) 463 return -ENOMEM; 464 } 465 466 ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, 467 &vmw_be->vsgt, ttm->num_pages, 468 vmw_be->gmr_id); 469 break; 470 case VMW_PL_SYSTEM: 471 /* Nothing to be done for a system bind */ 472 break; 473 default: 474 BUG(); 475 } 476 vmw_be->bound = true; 477 return ret; 478 } 479 480 static void vmw_ttm_unbind(struct ttm_device *bdev, 481 struct ttm_tt *ttm) 482 { 483 struct vmw_ttm_tt *vmw_be = 484 container_of(ttm, struct vmw_ttm_tt, dma_ttm); 485 486 if (!vmw_be->bound) 487 return; 488 489 switch (vmw_be->mem_type) { 490 case VMW_PL_GMR: 491 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); 492 break; 493 case VMW_PL_MOB: 494 vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); 495 break; 496 case VMW_PL_SYSTEM: 497 break; 498 default: 499 BUG(); 500 } 501 502 if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) 503 vmw_ttm_unmap_dma(vmw_be); 504 vmw_be->bound = false; 505 } 506 507 508 static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) 509 { 510 struct vmw_ttm_tt *vmw_be = 511 container_of(ttm, struct vmw_ttm_tt, dma_ttm); 512 513 vmw_ttm_unmap_dma(vmw_be); 514 ttm_tt_fini(ttm); 515 if (vmw_be->mob) 516 vmw_mob_destroy(vmw_be->mob); 517 518 kfree(vmw_be); 519 } 520 521 522 static int vmw_ttm_populate(struct ttm_device *bdev, 523 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) 524 { 525 unsigned int i; 526 int ret; 527 528 /* TODO: maybe completely drop this ? */ 529 if (ttm_tt_is_populated(ttm)) 530 return 0; 531 532 ret = ttm_pool_alloc(&bdev->pool, ttm, ctx); 533 if (ret) 534 return ret; 535 536 for (i = 0; i < ttm->num_pages; ++i) { 537 ret = ttm_mem_global_alloc_page(&ttm_mem_glob, ttm->pages[i], 538 PAGE_SIZE, ctx); 539 if (ret) 540 goto error; 541 } 542 return 0; 543 544 error: 545 while (i--) 546 ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i], 547 PAGE_SIZE); 548 ttm_pool_free(&bdev->pool, ttm); 549 return ret; 550 } 551 552 static void vmw_ttm_unpopulate(struct ttm_device *bdev, 553 struct ttm_tt *ttm) 554 { 555 struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, 556 dma_ttm); 557 unsigned int i; 558 559 vmw_ttm_unbind(bdev, ttm); 560 561 if (vmw_tt->mob) { 562 vmw_mob_destroy(vmw_tt->mob); 563 vmw_tt->mob = NULL; 564 } 565 566 vmw_ttm_unmap_dma(vmw_tt); 567 568 for (i = 0; i < ttm->num_pages; ++i) 569 ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i], 570 PAGE_SIZE); 571 572 ttm_pool_free(&bdev->pool, ttm); 573 } 574 575 static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, 576 uint32_t page_flags) 577 { 578 struct vmw_ttm_tt *vmw_be; 579 int ret; 580 581 vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL); 582 if (!vmw_be) 583 return NULL; 584 585 vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev); 586 vmw_be->mob = NULL; 587 588 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) 589 ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags, 590 ttm_cached); 591 else 592 ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags, 593 ttm_cached); 594 if (unlikely(ret != 0)) 595 goto out_no_init; 596 597 return &vmw_be->dma_ttm; 598 out_no_init: 599 kfree(vmw_be); 600 return NULL; 601 } 602 603 static void vmw_evict_flags(struct ttm_buffer_object *bo, 604 struct ttm_placement *placement) 605 { 606 *placement = vmw_sys_placement; 607 } 608 609 static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) 610 { 611 struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); 612 613 switch (mem->mem_type) { 614 case TTM_PL_SYSTEM: 615 case VMW_PL_SYSTEM: 616 case VMW_PL_GMR: 617 case VMW_PL_MOB: 618 return 0; 619 case TTM_PL_VRAM: 620 mem->bus.offset = (mem->start << PAGE_SHIFT) + 621 dev_priv->vram_start; 622 mem->bus.is_iomem = true; 623 mem->bus.caching = ttm_cached; 624 break; 625 default: 626 return -EINVAL; 627 } 628 return 0; 629 } 630 631 /** 632 * vmw_move_notify - TTM move_notify_callback 633 * 634 * @bo: The TTM buffer object about to move. 635 * @old_mem: The old memory where we move from 636 * @new_mem: The struct ttm_resource indicating to what memory 637 * region the move is taking place. 638 * 639 * Calls move_notify for all subsystems needing it. 640 * (currently only resources). 641 */ 642 static void vmw_move_notify(struct ttm_buffer_object *bo, 643 struct ttm_resource *old_mem, 644 struct ttm_resource *new_mem) 645 { 646 vmw_bo_move_notify(bo, new_mem); 647 vmw_query_move_notify(bo, old_mem, new_mem); 648 } 649 650 651 /** 652 * vmw_swap_notify - TTM move_notify_callback 653 * 654 * @bo: The TTM buffer object about to be swapped out. 655 */ 656 static void vmw_swap_notify(struct ttm_buffer_object *bo) 657 { 658 vmw_bo_swap_notify(bo); 659 (void) ttm_bo_wait(bo, false, false); 660 } 661 662 static bool vmw_memtype_is_system(uint32_t mem_type) 663 { 664 return mem_type == TTM_PL_SYSTEM || mem_type == VMW_PL_SYSTEM; 665 } 666 667 static int vmw_move(struct ttm_buffer_object *bo, 668 bool evict, 669 struct ttm_operation_ctx *ctx, 670 struct ttm_resource *new_mem, 671 struct ttm_place *hop) 672 { 673 struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->resource->mem_type); 674 struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type); 675 int ret; 676 677 if (new_man->use_tt && !vmw_memtype_is_system(new_mem->mem_type)) { 678 ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem); 679 if (ret) 680 return ret; 681 } 682 683 vmw_move_notify(bo, bo->resource, new_mem); 684 685 if (old_man->use_tt && new_man->use_tt) { 686 if (vmw_memtype_is_system(bo->resource->mem_type)) { 687 ttm_bo_move_null(bo, new_mem); 688 return 0; 689 } 690 ret = ttm_bo_wait_ctx(bo, ctx); 691 if (ret) 692 goto fail; 693 694 vmw_ttm_unbind(bo->bdev, bo->ttm); 695 ttm_resource_free(bo, &bo->resource); 696 ttm_bo_assign_mem(bo, new_mem); 697 return 0; 698 } else { 699 ret = ttm_bo_move_memcpy(bo, ctx, new_mem); 700 if (ret) 701 goto fail; 702 } 703 return 0; 704 fail: 705 vmw_move_notify(bo, new_mem, bo->resource); 706 return ret; 707 } 708 709 struct ttm_device_funcs vmw_bo_driver = { 710 .ttm_tt_create = &vmw_ttm_tt_create, 711 .ttm_tt_populate = &vmw_ttm_populate, 712 .ttm_tt_unpopulate = &vmw_ttm_unpopulate, 713 .ttm_tt_destroy = &vmw_ttm_destroy, 714 .eviction_valuable = ttm_bo_eviction_valuable, 715 .evict_flags = vmw_evict_flags, 716 .move = vmw_move, 717 .swap_notify = vmw_swap_notify, 718 .io_mem_reserve = &vmw_ttm_io_mem_reserve, 719 }; 720 721 int vmw_bo_create_and_populate(struct vmw_private *dev_priv, 722 unsigned long bo_size, 723 struct ttm_buffer_object **bo_p) 724 { 725 struct ttm_operation_ctx ctx = { 726 .interruptible = false, 727 .no_wait_gpu = false 728 }; 729 struct ttm_buffer_object *bo; 730 int ret; 731 732 ret = vmw_bo_create_kernel(dev_priv, bo_size, 733 &vmw_pt_sys_placement, 734 &bo); 735 if (unlikely(ret != 0)) 736 return ret; 737 738 ret = ttm_bo_reserve(bo, false, true, NULL); 739 BUG_ON(ret != 0); 740 ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx); 741 if (likely(ret == 0)) { 742 struct vmw_ttm_tt *vmw_tt = 743 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm); 744 ret = vmw_ttm_map_dma(vmw_tt); 745 } 746 747 ttm_bo_unreserve(bo); 748 749 if (likely(ret == 0)) 750 *bo_p = bo; 751 return ret; 752 } 753