1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include "vmwgfx_drv.h" 29 #include <drm/ttm/ttm_bo_driver.h> 30 #include <drm/ttm/ttm_placement.h> 31 #include <drm/ttm/ttm_page_alloc.h> 32 33 static const struct ttm_place vram_placement_flags = { 34 .fpfn = 0, 35 .lpfn = 0, 36 .mem_type = TTM_PL_VRAM, 37 .flags = TTM_PL_FLAG_CACHED 38 }; 39 40 static const struct ttm_place vram_ne_placement_flags = { 41 .fpfn = 0, 42 .lpfn = 0, 43 .mem_type = TTM_PL_VRAM, 44 .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT 45 }; 46 47 static const struct ttm_place sys_placement_flags = { 48 .fpfn = 0, 49 .lpfn = 0, 50 .mem_type = TTM_PL_SYSTEM, 51 .flags = TTM_PL_FLAG_CACHED 52 }; 53 54 static const struct ttm_place sys_ne_placement_flags = { 55 .fpfn = 0, 56 .lpfn = 0, 57 .mem_type = TTM_PL_SYSTEM, 58 .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT 59 }; 60 61 static const struct ttm_place gmr_placement_flags = { 62 .fpfn = 0, 63 .lpfn = 0, 64 .mem_type = VMW_PL_GMR, 65 .flags = TTM_PL_FLAG_CACHED 66 }; 67 68 static const struct ttm_place gmr_ne_placement_flags = { 69 .fpfn = 0, 70 .lpfn = 0, 71 .mem_type = VMW_PL_GMR, 72 .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT 73 }; 74 75 static const struct ttm_place mob_placement_flags = { 76 .fpfn = 0, 77 .lpfn = 0, 78 .mem_type = VMW_PL_MOB, 79 .flags = TTM_PL_FLAG_CACHED 80 }; 81 82 static const struct ttm_place mob_ne_placement_flags = { 83 .fpfn = 0, 84 .lpfn = 0, 85 .mem_type = VMW_PL_MOB, 86 .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT 87 }; 88 89 struct ttm_placement vmw_vram_placement = { 90 .num_placement = 1, 91 .placement = &vram_placement_flags, 92 .num_busy_placement = 1, 93 .busy_placement = &vram_placement_flags 94 }; 95 96 static const struct ttm_place vram_gmr_placement_flags[] = { 97 { 98 .fpfn = 0, 99 .lpfn = 0, 100 .mem_type = TTM_PL_VRAM, 101 .flags = TTM_PL_FLAG_CACHED 102 }, { 103 .fpfn = 0, 104 .lpfn = 0, 105 .mem_type = VMW_PL_GMR, 106 .flags = TTM_PL_FLAG_CACHED 107 } 108 }; 109 110 static const struct ttm_place gmr_vram_placement_flags[] = { 111 { 112 .fpfn = 0, 113 .lpfn = 0, 114 .mem_type = VMW_PL_GMR, 115 .flags = TTM_PL_FLAG_CACHED 116 }, { 117 .fpfn = 0, 118 .lpfn = 0, 119 .mem_type = TTM_PL_VRAM, 120 .flags = TTM_PL_FLAG_CACHED 121 } 122 }; 123 124 struct ttm_placement vmw_vram_gmr_placement = { 125 .num_placement = 2, 126 .placement = vram_gmr_placement_flags, 127 .num_busy_placement = 1, 128 .busy_placement = &gmr_placement_flags 129 }; 130 131 static const struct ttm_place vram_gmr_ne_placement_flags[] = { 132 { 133 .fpfn = 0, 134 .lpfn = 0, 135 .mem_type = TTM_PL_VRAM, 136 .flags = TTM_PL_FLAG_CACHED | 137 TTM_PL_FLAG_NO_EVICT 138 }, { 139 .fpfn = 0, 140 .lpfn = 0, 141 .mem_type = VMW_PL_GMR, 142 .flags = TTM_PL_FLAG_CACHED | 143 TTM_PL_FLAG_NO_EVICT 144 } 145 }; 146 147 struct ttm_placement vmw_vram_gmr_ne_placement = { 148 .num_placement = 2, 149 .placement = vram_gmr_ne_placement_flags, 150 .num_busy_placement = 1, 151 .busy_placement = &gmr_ne_placement_flags 152 }; 153 154 struct ttm_placement vmw_vram_sys_placement = { 155 .num_placement = 1, 156 .placement = &vram_placement_flags, 157 .num_busy_placement = 1, 158 .busy_placement = &sys_placement_flags 159 }; 160 161 struct ttm_placement vmw_vram_ne_placement = { 162 .num_placement = 1, 163 .placement = &vram_ne_placement_flags, 164 .num_busy_placement = 1, 165 .busy_placement = &vram_ne_placement_flags 166 }; 167 168 struct ttm_placement vmw_sys_placement = { 169 .num_placement = 1, 170 .placement = &sys_placement_flags, 171 .num_busy_placement = 1, 172 .busy_placement = &sys_placement_flags 173 }; 174 175 struct ttm_placement vmw_sys_ne_placement = { 176 .num_placement = 1, 177 .placement = &sys_ne_placement_flags, 178 .num_busy_placement = 1, 179 .busy_placement = &sys_ne_placement_flags 180 }; 181 182 static const struct ttm_place evictable_placement_flags[] = { 183 { 184 .fpfn = 0, 185 .lpfn = 0, 186 .mem_type = TTM_PL_SYSTEM, 187 .flags = TTM_PL_FLAG_CACHED 188 }, { 189 .fpfn = 0, 190 .lpfn = 0, 191 .mem_type = TTM_PL_VRAM, 192 .flags = TTM_PL_FLAG_CACHED 193 }, { 194 .fpfn = 0, 195 .lpfn = 0, 196 .mem_type = VMW_PL_GMR, 197 .flags = TTM_PL_FLAG_CACHED 198 }, { 199 .fpfn = 0, 200 .lpfn = 0, 201 .mem_type = VMW_PL_MOB, 202 .flags = TTM_PL_FLAG_CACHED 203 } 204 }; 205 206 static const struct ttm_place nonfixed_placement_flags[] = { 207 { 208 .fpfn = 0, 209 .lpfn = 0, 210 .mem_type = TTM_PL_SYSTEM, 211 .flags = TTM_PL_FLAG_CACHED 212 }, { 213 .fpfn = 0, 214 .lpfn = 0, 215 .mem_type = VMW_PL_GMR, 216 .flags = TTM_PL_FLAG_CACHED 217 }, { 218 .fpfn = 0, 219 .lpfn = 0, 220 .mem_type = VMW_PL_MOB, 221 .flags = TTM_PL_FLAG_CACHED 222 } 223 }; 224 225 struct ttm_placement vmw_evictable_placement = { 226 .num_placement = 4, 227 .placement = evictable_placement_flags, 228 .num_busy_placement = 1, 229 .busy_placement = &sys_placement_flags 230 }; 231 232 struct ttm_placement vmw_srf_placement = { 233 .num_placement = 1, 234 .num_busy_placement = 2, 235 .placement = &gmr_placement_flags, 236 .busy_placement = gmr_vram_placement_flags 237 }; 238 239 struct ttm_placement vmw_mob_placement = { 240 .num_placement = 1, 241 .num_busy_placement = 1, 242 .placement = &mob_placement_flags, 243 .busy_placement = &mob_placement_flags 244 }; 245 246 struct ttm_placement vmw_mob_ne_placement = { 247 .num_placement = 1, 248 .num_busy_placement = 1, 249 .placement = &mob_ne_placement_flags, 250 .busy_placement = &mob_ne_placement_flags 251 }; 252 253 struct ttm_placement vmw_nonfixed_placement = { 254 .num_placement = 3, 255 .placement = nonfixed_placement_flags, 256 .num_busy_placement = 1, 257 .busy_placement = &sys_placement_flags 258 }; 259 260 struct vmw_ttm_tt { 261 struct ttm_dma_tt dma_ttm; 262 struct vmw_private *dev_priv; 263 int gmr_id; 264 struct vmw_mob *mob; 265 int mem_type; 266 struct sg_table sgt; 267 struct vmw_sg_table vsgt; 268 uint64_t sg_alloc_size; 269 bool mapped; 270 bool bound; 271 }; 272 273 const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); 274 275 /** 276 * Helper functions to advance a struct vmw_piter iterator. 277 * 278 * @viter: Pointer to the iterator. 279 * 280 * These functions return false if past the end of the list, 281 * true otherwise. Functions are selected depending on the current 282 * DMA mapping mode. 283 */ 284 static bool __vmw_piter_non_sg_next(struct vmw_piter *viter) 285 { 286 return ++(viter->i) < viter->num_pages; 287 } 288 289 static bool __vmw_piter_sg_next(struct vmw_piter *viter) 290 { 291 bool ret = __vmw_piter_non_sg_next(viter); 292 293 return __sg_page_iter_dma_next(&viter->iter) && ret; 294 } 295 296 297 /** 298 * Helper functions to return a pointer to the current page. 299 * 300 * @viter: Pointer to the iterator 301 * 302 * These functions return a pointer to the page currently 303 * pointed to by @viter. Functions are selected depending on the 304 * current mapping mode. 305 */ 306 static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter) 307 { 308 return viter->pages[viter->i]; 309 } 310 311 /** 312 * Helper functions to return the DMA address of the current page. 313 * 314 * @viter: Pointer to the iterator 315 * 316 * These functions return the DMA address of the page currently 317 * pointed to by @viter. Functions are selected depending on the 318 * current mapping mode. 319 */ 320 static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter) 321 { 322 return page_to_phys(viter->pages[viter->i]); 323 } 324 325 static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter) 326 { 327 return viter->addrs[viter->i]; 328 } 329 330 static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter) 331 { 332 return sg_page_iter_dma_address(&viter->iter); 333 } 334 335 336 /** 337 * vmw_piter_start - Initialize a struct vmw_piter. 338 * 339 * @viter: Pointer to the iterator to initialize 340 * @vsgt: Pointer to a struct vmw_sg_table to initialize from 341 * 342 * Note that we're following the convention of __sg_page_iter_start, so that 343 * the iterator doesn't point to a valid page after initialization; it has 344 * to be advanced one step first. 345 */ 346 void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, 347 unsigned long p_offset) 348 { 349 viter->i = p_offset - 1; 350 viter->num_pages = vsgt->num_pages; 351 viter->page = &__vmw_piter_non_sg_page; 352 viter->pages = vsgt->pages; 353 switch (vsgt->mode) { 354 case vmw_dma_phys: 355 viter->next = &__vmw_piter_non_sg_next; 356 viter->dma_address = &__vmw_piter_phys_addr; 357 break; 358 case vmw_dma_alloc_coherent: 359 viter->next = &__vmw_piter_non_sg_next; 360 viter->dma_address = &__vmw_piter_dma_addr; 361 viter->addrs = vsgt->addrs; 362 break; 363 case vmw_dma_map_populate: 364 case vmw_dma_map_bind: 365 viter->next = &__vmw_piter_sg_next; 366 viter->dma_address = &__vmw_piter_sg_addr; 367 __sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl, 368 vsgt->sgt->orig_nents, p_offset); 369 break; 370 default: 371 BUG(); 372 } 373 } 374 375 /** 376 * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for 377 * TTM pages 378 * 379 * @vmw_tt: Pointer to a struct vmw_ttm_backend 380 * 381 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma. 382 */ 383 static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt) 384 { 385 struct device *dev = vmw_tt->dev_priv->dev->dev; 386 387 dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0); 388 vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents; 389 } 390 391 /** 392 * vmw_ttm_map_for_dma - map TTM pages to get device addresses 393 * 394 * @vmw_tt: Pointer to a struct vmw_ttm_backend 395 * 396 * This function is used to get device addresses from the kernel DMA layer. 397 * However, it's violating the DMA API in that when this operation has been 398 * performed, it's illegal for the CPU to write to the pages without first 399 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is 400 * therefore only legal to call this function if we know that the function 401 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most 402 * a CPU write buffer flush. 403 */ 404 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) 405 { 406 struct device *dev = vmw_tt->dev_priv->dev->dev; 407 408 return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0); 409 } 410 411 /** 412 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device 413 * 414 * @vmw_tt: Pointer to a struct vmw_ttm_tt 415 * 416 * Select the correct function for and make sure the TTM pages are 417 * visible to the device. Allocate storage for the device mappings. 418 * If a mapping has already been performed, indicated by the storage 419 * pointer being non NULL, the function returns success. 420 */ 421 static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) 422 { 423 struct vmw_private *dev_priv = vmw_tt->dev_priv; 424 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); 425 struct vmw_sg_table *vsgt = &vmw_tt->vsgt; 426 struct ttm_operation_ctx ctx = { 427 .interruptible = true, 428 .no_wait_gpu = false 429 }; 430 struct vmw_piter iter; 431 dma_addr_t old; 432 int ret = 0; 433 static size_t sgl_size; 434 static size_t sgt_size; 435 struct scatterlist *sg; 436 437 if (vmw_tt->mapped) 438 return 0; 439 440 vsgt->mode = dev_priv->map_mode; 441 vsgt->pages = vmw_tt->dma_ttm.ttm.pages; 442 vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; 443 vsgt->addrs = vmw_tt->dma_ttm.dma_address; 444 vsgt->sgt = &vmw_tt->sgt; 445 446 switch (dev_priv->map_mode) { 447 case vmw_dma_map_bind: 448 case vmw_dma_map_populate: 449 if (unlikely(!sgl_size)) { 450 sgl_size = ttm_round_pot(sizeof(struct scatterlist)); 451 sgt_size = ttm_round_pot(sizeof(struct sg_table)); 452 } 453 vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages; 454 ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx); 455 if (unlikely(ret != 0)) 456 return ret; 457 458 sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, 459 vsgt->num_pages, 0, 460 (unsigned long) vsgt->num_pages << PAGE_SHIFT, 461 dma_get_max_seg_size(dev_priv->dev->dev), 462 NULL, 0, GFP_KERNEL); 463 if (IS_ERR(sg)) { 464 ret = PTR_ERR(sg); 465 goto out_sg_alloc_fail; 466 } 467 468 if (vsgt->num_pages > vmw_tt->sgt.orig_nents) { 469 uint64_t over_alloc = 470 sgl_size * (vsgt->num_pages - 471 vmw_tt->sgt.orig_nents); 472 473 ttm_mem_global_free(glob, over_alloc); 474 vmw_tt->sg_alloc_size -= over_alloc; 475 } 476 477 ret = vmw_ttm_map_for_dma(vmw_tt); 478 if (unlikely(ret != 0)) 479 goto out_map_fail; 480 481 break; 482 default: 483 break; 484 } 485 486 old = ~((dma_addr_t) 0); 487 vmw_tt->vsgt.num_regions = 0; 488 for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) { 489 dma_addr_t cur = vmw_piter_dma_addr(&iter); 490 491 if (cur != old + PAGE_SIZE) 492 vmw_tt->vsgt.num_regions++; 493 old = cur; 494 } 495 496 vmw_tt->mapped = true; 497 return 0; 498 499 out_map_fail: 500 sg_free_table(vmw_tt->vsgt.sgt); 501 vmw_tt->vsgt.sgt = NULL; 502 out_sg_alloc_fail: 503 ttm_mem_global_free(glob, vmw_tt->sg_alloc_size); 504 return ret; 505 } 506 507 /** 508 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings 509 * 510 * @vmw_tt: Pointer to a struct vmw_ttm_tt 511 * 512 * Tear down any previously set up device DMA mappings and free 513 * any storage space allocated for them. If there are no mappings set up, 514 * this function is a NOP. 515 */ 516 static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) 517 { 518 struct vmw_private *dev_priv = vmw_tt->dev_priv; 519 520 if (!vmw_tt->vsgt.sgt) 521 return; 522 523 switch (dev_priv->map_mode) { 524 case vmw_dma_map_bind: 525 case vmw_dma_map_populate: 526 vmw_ttm_unmap_from_dma(vmw_tt); 527 sg_free_table(vmw_tt->vsgt.sgt); 528 vmw_tt->vsgt.sgt = NULL; 529 ttm_mem_global_free(vmw_mem_glob(dev_priv), 530 vmw_tt->sg_alloc_size); 531 break; 532 default: 533 break; 534 } 535 vmw_tt->mapped = false; 536 } 537 538 /** 539 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a 540 * TTM buffer object 541 * 542 * @bo: Pointer to a struct ttm_buffer_object 543 * 544 * Returns a pointer to a struct vmw_sg_table object. The object should 545 * not be freed after use. 546 * Note that for the device addresses to be valid, the buffer object must 547 * either be reserved or pinned. 548 */ 549 const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) 550 { 551 struct vmw_ttm_tt *vmw_tt = 552 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); 553 554 return &vmw_tt->vsgt; 555 } 556 557 558 static int vmw_ttm_bind(struct ttm_bo_device *bdev, 559 struct ttm_tt *ttm, struct ttm_resource *bo_mem) 560 { 561 struct vmw_ttm_tt *vmw_be = 562 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); 563 int ret = 0; 564 565 if (!bo_mem) 566 return -EINVAL; 567 568 if (vmw_be->bound) 569 return 0; 570 571 ret = vmw_ttm_map_dma(vmw_be); 572 if (unlikely(ret != 0)) 573 return ret; 574 575 vmw_be->gmr_id = bo_mem->start; 576 vmw_be->mem_type = bo_mem->mem_type; 577 578 switch (bo_mem->mem_type) { 579 case VMW_PL_GMR: 580 ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, 581 ttm->num_pages, vmw_be->gmr_id); 582 break; 583 case VMW_PL_MOB: 584 if (unlikely(vmw_be->mob == NULL)) { 585 vmw_be->mob = 586 vmw_mob_create(ttm->num_pages); 587 if (unlikely(vmw_be->mob == NULL)) 588 return -ENOMEM; 589 } 590 591 ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, 592 &vmw_be->vsgt, ttm->num_pages, 593 vmw_be->gmr_id); 594 break; 595 default: 596 BUG(); 597 } 598 vmw_be->bound = true; 599 return ret; 600 } 601 602 static void vmw_ttm_unbind(struct ttm_bo_device *bdev, 603 struct ttm_tt *ttm) 604 { 605 struct vmw_ttm_tt *vmw_be = 606 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); 607 608 if (!vmw_be->bound) 609 return; 610 611 switch (vmw_be->mem_type) { 612 case VMW_PL_GMR: 613 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); 614 break; 615 case VMW_PL_MOB: 616 vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); 617 break; 618 default: 619 BUG(); 620 } 621 622 if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) 623 vmw_ttm_unmap_dma(vmw_be); 624 vmw_be->bound = false; 625 } 626 627 628 static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) 629 { 630 struct vmw_ttm_tt *vmw_be = 631 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); 632 633 vmw_ttm_unbind(bdev, ttm); 634 ttm_tt_destroy_common(bdev, ttm); 635 vmw_ttm_unmap_dma(vmw_be); 636 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) 637 ttm_dma_tt_fini(&vmw_be->dma_ttm); 638 else 639 ttm_tt_fini(ttm); 640 641 if (vmw_be->mob) 642 vmw_mob_destroy(vmw_be->mob); 643 644 kfree(vmw_be); 645 } 646 647 648 static int vmw_ttm_populate(struct ttm_bo_device *bdev, 649 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) 650 { 651 struct vmw_ttm_tt *vmw_tt = 652 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); 653 struct vmw_private *dev_priv = vmw_tt->dev_priv; 654 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); 655 int ret; 656 657 if (ttm_tt_is_populated(ttm)) 658 return 0; 659 660 if (dev_priv->map_mode == vmw_dma_alloc_coherent) { 661 size_t size = 662 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); 663 ret = ttm_mem_global_alloc(glob, size, ctx); 664 if (unlikely(ret != 0)) 665 return ret; 666 667 ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev, 668 ctx); 669 if (unlikely(ret != 0)) 670 ttm_mem_global_free(glob, size); 671 } else 672 ret = ttm_pool_populate(ttm, ctx); 673 674 return ret; 675 } 676 677 static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev, 678 struct ttm_tt *ttm) 679 { 680 struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, 681 dma_ttm.ttm); 682 struct vmw_private *dev_priv = vmw_tt->dev_priv; 683 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); 684 685 686 if (vmw_tt->mob) { 687 vmw_mob_destroy(vmw_tt->mob); 688 vmw_tt->mob = NULL; 689 } 690 691 vmw_ttm_unmap_dma(vmw_tt); 692 if (dev_priv->map_mode == vmw_dma_alloc_coherent) { 693 size_t size = 694 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); 695 696 ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev); 697 ttm_mem_global_free(glob, size); 698 } else 699 ttm_pool_unpopulate(ttm); 700 } 701 702 static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, 703 uint32_t page_flags) 704 { 705 struct vmw_ttm_tt *vmw_be; 706 int ret; 707 708 vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL); 709 if (!vmw_be) 710 return NULL; 711 712 vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev); 713 vmw_be->mob = NULL; 714 715 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) 716 ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags); 717 else 718 ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags); 719 if (unlikely(ret != 0)) 720 goto out_no_init; 721 722 return &vmw_be->dma_ttm.ttm; 723 out_no_init: 724 kfree(vmw_be); 725 return NULL; 726 } 727 728 static void vmw_evict_flags(struct ttm_buffer_object *bo, 729 struct ttm_placement *placement) 730 { 731 *placement = vmw_sys_placement; 732 } 733 734 static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) 735 { 736 struct ttm_object_file *tfile = 737 vmw_fpriv((struct drm_file *)filp->private_data)->tfile; 738 739 return vmw_user_bo_verify_access(bo, tfile); 740 } 741 742 static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) 743 { 744 struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); 745 746 switch (mem->mem_type) { 747 case TTM_PL_SYSTEM: 748 case VMW_PL_GMR: 749 case VMW_PL_MOB: 750 return 0; 751 case TTM_PL_VRAM: 752 mem->bus.offset = (mem->start << PAGE_SHIFT) + 753 dev_priv->vram_start; 754 mem->bus.is_iomem = true; 755 break; 756 default: 757 return -EINVAL; 758 } 759 return 0; 760 } 761 762 /** 763 * vmw_move_notify - TTM move_notify_callback 764 * 765 * @bo: The TTM buffer object about to move. 766 * @mem: The struct ttm_resource indicating to what memory 767 * region the move is taking place. 768 * 769 * Calls move_notify for all subsystems needing it. 770 * (currently only resources). 771 */ 772 static void vmw_move_notify(struct ttm_buffer_object *bo, 773 bool evict, 774 struct ttm_resource *mem) 775 { 776 vmw_bo_move_notify(bo, mem); 777 vmw_query_move_notify(bo, mem); 778 } 779 780 781 /** 782 * vmw_swap_notify - TTM move_notify_callback 783 * 784 * @bo: The TTM buffer object about to be swapped out. 785 */ 786 static void vmw_swap_notify(struct ttm_buffer_object *bo) 787 { 788 vmw_bo_swap_notify(bo); 789 (void) ttm_bo_wait(bo, false, false); 790 } 791 792 793 struct ttm_bo_driver vmw_bo_driver = { 794 .ttm_tt_create = &vmw_ttm_tt_create, 795 .ttm_tt_populate = &vmw_ttm_populate, 796 .ttm_tt_unpopulate = &vmw_ttm_unpopulate, 797 .ttm_tt_bind = &vmw_ttm_bind, 798 .ttm_tt_unbind = &vmw_ttm_unbind, 799 .ttm_tt_destroy = &vmw_ttm_destroy, 800 .eviction_valuable = ttm_bo_eviction_valuable, 801 .evict_flags = vmw_evict_flags, 802 .move = NULL, 803 .verify_access = vmw_verify_access, 804 .move_notify = vmw_move_notify, 805 .swap_notify = vmw_swap_notify, 806 .io_mem_reserve = &vmw_ttm_io_mem_reserve, 807 }; 808 809 int vmw_bo_create_and_populate(struct vmw_private *dev_priv, 810 unsigned long bo_size, 811 struct ttm_buffer_object **bo_p) 812 { 813 struct ttm_operation_ctx ctx = { 814 .interruptible = false, 815 .no_wait_gpu = false 816 }; 817 struct ttm_buffer_object *bo; 818 int ret; 819 820 ret = ttm_bo_create(&dev_priv->bdev, bo_size, 821 ttm_bo_type_device, 822 &vmw_sys_ne_placement, 823 0, false, &bo); 824 825 if (unlikely(ret != 0)) 826 return ret; 827 828 ret = ttm_bo_reserve(bo, false, true, NULL); 829 BUG_ON(ret != 0); 830 ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx); 831 if (likely(ret == 0)) { 832 struct vmw_ttm_tt *vmw_tt = 833 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); 834 ret = vmw_ttm_map_dma(vmw_tt); 835 } 836 837 ttm_bo_unreserve(bo); 838 839 if (likely(ret == 0)) 840 *bo_p = bo; 841 return ret; 842 } 843