1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include "vmwgfx_drv.h" 29 #include <drm/ttm/ttm_bo_driver.h> 30 #include <drm/ttm/ttm_placement.h> 31 #include <drm/ttm/ttm_page_alloc.h> 32 33 static const struct ttm_place vram_placement_flags = { 34 .fpfn = 0, 35 .lpfn = 0, 36 .mem_type = TTM_PL_VRAM, 37 .flags = TTM_PL_FLAG_CACHED 38 }; 39 40 static const struct ttm_place vram_ne_placement_flags = { 41 .fpfn = 0, 42 .lpfn = 0, 43 .mem_type = TTM_PL_VRAM, 44 .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT 45 }; 46 47 static const struct ttm_place sys_placement_flags = { 48 .fpfn = 0, 49 .lpfn = 0, 50 .mem_type = TTM_PL_SYSTEM, 51 .flags = TTM_PL_FLAG_CACHED 52 }; 53 54 static const struct ttm_place sys_ne_placement_flags = { 55 .fpfn = 0, 56 .lpfn = 0, 57 .mem_type = TTM_PL_SYSTEM, 58 .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT 59 }; 60 61 static const struct ttm_place gmr_placement_flags = { 62 .fpfn = 0, 63 .lpfn = 0, 64 .mem_type = VMW_PL_GMR, 65 .flags = TTM_PL_FLAG_CACHED 66 }; 67 68 static const struct ttm_place gmr_ne_placement_flags = { 69 .fpfn = 0, 70 .lpfn = 0, 71 .mem_type = VMW_PL_GMR, 72 .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT 73 }; 74 75 static const struct ttm_place mob_placement_flags = { 76 .fpfn = 0, 77 .lpfn = 0, 78 .mem_type = VMW_PL_MOB, 79 .flags = TTM_PL_FLAG_CACHED 80 }; 81 82 static const struct ttm_place mob_ne_placement_flags = { 83 .fpfn = 0, 84 .lpfn = 0, 85 .mem_type = VMW_PL_MOB, 86 .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT 87 }; 88 89 struct ttm_placement vmw_vram_placement = { 90 .num_placement = 1, 91 .placement = &vram_placement_flags, 92 .num_busy_placement = 1, 93 .busy_placement = &vram_placement_flags 94 }; 95 96 static const struct ttm_place vram_gmr_placement_flags[] = { 97 { 98 .fpfn = 0, 99 .lpfn = 0, 100 .mem_type = TTM_PL_VRAM, 101 .flags = TTM_PL_FLAG_CACHED 102 }, { 103 .fpfn = 0, 104 .lpfn = 0, 105 .mem_type = VMW_PL_GMR, 106 .flags = TTM_PL_FLAG_CACHED 107 } 108 }; 109 110 static const struct ttm_place gmr_vram_placement_flags[] = { 111 { 112 .fpfn = 0, 113 .lpfn = 0, 114 .mem_type = VMW_PL_GMR, 115 .flags = TTM_PL_FLAG_CACHED 116 }, { 117 .fpfn = 0, 118 .lpfn = 0, 119 .mem_type = TTM_PL_VRAM, 120 .flags = TTM_PL_FLAG_CACHED 121 } 122 }; 123 124 struct ttm_placement vmw_vram_gmr_placement = { 125 .num_placement = 2, 126 .placement = vram_gmr_placement_flags, 127 .num_busy_placement = 1, 128 .busy_placement = &gmr_placement_flags 129 }; 130 131 static const struct ttm_place vram_gmr_ne_placement_flags[] = { 132 { 133 .fpfn = 0, 134 .lpfn = 0, 135 .mem_type = TTM_PL_VRAM, 136 .flags = TTM_PL_FLAG_CACHED | 137 TTM_PL_FLAG_NO_EVICT 138 }, { 139 .fpfn = 0, 140 .lpfn = 0, 141 .mem_type = VMW_PL_GMR, 142 .flags = TTM_PL_FLAG_CACHED | 143 TTM_PL_FLAG_NO_EVICT 144 } 145 }; 146 147 struct ttm_placement vmw_vram_gmr_ne_placement = { 148 .num_placement = 2, 149 .placement = vram_gmr_ne_placement_flags, 150 .num_busy_placement = 1, 151 .busy_placement = &gmr_ne_placement_flags 152 }; 153 154 struct ttm_placement vmw_vram_sys_placement = { 155 .num_placement = 1, 156 .placement = &vram_placement_flags, 157 .num_busy_placement = 1, 158 .busy_placement = &sys_placement_flags 159 }; 160 161 struct ttm_placement vmw_vram_ne_placement = { 162 .num_placement = 1, 163 .placement = &vram_ne_placement_flags, 164 .num_busy_placement = 1, 165 .busy_placement = &vram_ne_placement_flags 166 }; 167 168 struct ttm_placement vmw_sys_placement = { 169 .num_placement = 1, 170 .placement = &sys_placement_flags, 171 .num_busy_placement = 1, 172 .busy_placement = &sys_placement_flags 173 }; 174 175 struct ttm_placement vmw_sys_ne_placement = { 176 .num_placement = 1, 177 .placement = &sys_ne_placement_flags, 178 .num_busy_placement = 1, 179 .busy_placement = &sys_ne_placement_flags 180 }; 181 182 static const struct ttm_place evictable_placement_flags[] = { 183 { 184 .fpfn = 0, 185 .lpfn = 0, 186 .mem_type = TTM_PL_SYSTEM, 187 .flags = TTM_PL_FLAG_CACHED 188 }, { 189 .fpfn = 0, 190 .lpfn = 0, 191 .mem_type = TTM_PL_VRAM, 192 .flags = TTM_PL_FLAG_CACHED 193 }, { 194 .fpfn = 0, 195 .lpfn = 0, 196 .mem_type = VMW_PL_GMR, 197 .flags = TTM_PL_FLAG_CACHED 198 }, { 199 .fpfn = 0, 200 .lpfn = 0, 201 .mem_type = VMW_PL_MOB, 202 .flags = TTM_PL_FLAG_CACHED 203 } 204 }; 205 206 static const struct ttm_place nonfixed_placement_flags[] = { 207 { 208 .fpfn = 0, 209 .lpfn = 0, 210 .mem_type = TTM_PL_SYSTEM, 211 .flags = TTM_PL_FLAG_CACHED 212 }, { 213 .fpfn = 0, 214 .lpfn = 0, 215 .mem_type = VMW_PL_GMR, 216 .flags = TTM_PL_FLAG_CACHED 217 }, { 218 .fpfn = 0, 219 .lpfn = 0, 220 .mem_type = VMW_PL_MOB, 221 .flags = TTM_PL_FLAG_CACHED 222 } 223 }; 224 225 struct ttm_placement vmw_evictable_placement = { 226 .num_placement = 4, 227 .placement = evictable_placement_flags, 228 .num_busy_placement = 1, 229 .busy_placement = &sys_placement_flags 230 }; 231 232 struct ttm_placement vmw_srf_placement = { 233 .num_placement = 1, 234 .num_busy_placement = 2, 235 .placement = &gmr_placement_flags, 236 .busy_placement = gmr_vram_placement_flags 237 }; 238 239 struct ttm_placement vmw_mob_placement = { 240 .num_placement = 1, 241 .num_busy_placement = 1, 242 .placement = &mob_placement_flags, 243 .busy_placement = &mob_placement_flags 244 }; 245 246 struct ttm_placement vmw_mob_ne_placement = { 247 .num_placement = 1, 248 .num_busy_placement = 1, 249 .placement = &mob_ne_placement_flags, 250 .busy_placement = &mob_ne_placement_flags 251 }; 252 253 struct ttm_placement vmw_nonfixed_placement = { 254 .num_placement = 3, 255 .placement = nonfixed_placement_flags, 256 .num_busy_placement = 1, 257 .busy_placement = &sys_placement_flags 258 }; 259 260 struct vmw_ttm_tt { 261 struct ttm_dma_tt dma_ttm; 262 struct vmw_private *dev_priv; 263 int gmr_id; 264 struct vmw_mob *mob; 265 int mem_type; 266 struct sg_table sgt; 267 struct vmw_sg_table vsgt; 268 uint64_t sg_alloc_size; 269 bool mapped; 270 bool bound; 271 }; 272 273 const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); 274 275 /** 276 * Helper functions to advance a struct vmw_piter iterator. 277 * 278 * @viter: Pointer to the iterator. 279 * 280 * These functions return false if past the end of the list, 281 * true otherwise. Functions are selected depending on the current 282 * DMA mapping mode. 283 */ 284 static bool __vmw_piter_non_sg_next(struct vmw_piter *viter) 285 { 286 return ++(viter->i) < viter->num_pages; 287 } 288 289 static bool __vmw_piter_sg_next(struct vmw_piter *viter) 290 { 291 bool ret = __vmw_piter_non_sg_next(viter); 292 293 return __sg_page_iter_dma_next(&viter->iter) && ret; 294 } 295 296 297 /** 298 * Helper functions to return a pointer to the current page. 299 * 300 * @viter: Pointer to the iterator 301 * 302 * These functions return a pointer to the page currently 303 * pointed to by @viter. Functions are selected depending on the 304 * current mapping mode. 305 */ 306 static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter) 307 { 308 return viter->pages[viter->i]; 309 } 310 311 /** 312 * Helper functions to return the DMA address of the current page. 313 * 314 * @viter: Pointer to the iterator 315 * 316 * These functions return the DMA address of the page currently 317 * pointed to by @viter. Functions are selected depending on the 318 * current mapping mode. 319 */ 320 static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter) 321 { 322 return page_to_phys(viter->pages[viter->i]); 323 } 324 325 static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter) 326 { 327 return viter->addrs[viter->i]; 328 } 329 330 static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter) 331 { 332 return sg_page_iter_dma_address(&viter->iter); 333 } 334 335 336 /** 337 * vmw_piter_start - Initialize a struct vmw_piter. 338 * 339 * @viter: Pointer to the iterator to initialize 340 * @vsgt: Pointer to a struct vmw_sg_table to initialize from 341 * 342 * Note that we're following the convention of __sg_page_iter_start, so that 343 * the iterator doesn't point to a valid page after initialization; it has 344 * to be advanced one step first. 345 */ 346 void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, 347 unsigned long p_offset) 348 { 349 viter->i = p_offset - 1; 350 viter->num_pages = vsgt->num_pages; 351 viter->page = &__vmw_piter_non_sg_page; 352 viter->pages = vsgt->pages; 353 switch (vsgt->mode) { 354 case vmw_dma_phys: 355 viter->next = &__vmw_piter_non_sg_next; 356 viter->dma_address = &__vmw_piter_phys_addr; 357 break; 358 case vmw_dma_alloc_coherent: 359 viter->next = &__vmw_piter_non_sg_next; 360 viter->dma_address = &__vmw_piter_dma_addr; 361 viter->addrs = vsgt->addrs; 362 break; 363 case vmw_dma_map_populate: 364 case vmw_dma_map_bind: 365 viter->next = &__vmw_piter_sg_next; 366 viter->dma_address = &__vmw_piter_sg_addr; 367 __sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl, 368 vsgt->sgt->orig_nents, p_offset); 369 break; 370 default: 371 BUG(); 372 } 373 } 374 375 /** 376 * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for 377 * TTM pages 378 * 379 * @vmw_tt: Pointer to a struct vmw_ttm_backend 380 * 381 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma. 382 */ 383 static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt) 384 { 385 struct device *dev = vmw_tt->dev_priv->dev->dev; 386 387 dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents, 388 DMA_BIDIRECTIONAL); 389 vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents; 390 } 391 392 /** 393 * vmw_ttm_map_for_dma - map TTM pages to get device addresses 394 * 395 * @vmw_tt: Pointer to a struct vmw_ttm_backend 396 * 397 * This function is used to get device addresses from the kernel DMA layer. 398 * However, it's violating the DMA API in that when this operation has been 399 * performed, it's illegal for the CPU to write to the pages without first 400 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is 401 * therefore only legal to call this function if we know that the function 402 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most 403 * a CPU write buffer flush. 404 */ 405 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) 406 { 407 struct device *dev = vmw_tt->dev_priv->dev->dev; 408 int ret; 409 410 ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents, 411 DMA_BIDIRECTIONAL); 412 if (unlikely(ret == 0)) 413 return -ENOMEM; 414 415 vmw_tt->sgt.nents = ret; 416 417 return 0; 418 } 419 420 /** 421 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device 422 * 423 * @vmw_tt: Pointer to a struct vmw_ttm_tt 424 * 425 * Select the correct function for and make sure the TTM pages are 426 * visible to the device. Allocate storage for the device mappings. 427 * If a mapping has already been performed, indicated by the storage 428 * pointer being non NULL, the function returns success. 429 */ 430 static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) 431 { 432 struct vmw_private *dev_priv = vmw_tt->dev_priv; 433 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); 434 struct vmw_sg_table *vsgt = &vmw_tt->vsgt; 435 struct ttm_operation_ctx ctx = { 436 .interruptible = true, 437 .no_wait_gpu = false 438 }; 439 struct vmw_piter iter; 440 dma_addr_t old; 441 int ret = 0; 442 static size_t sgl_size; 443 static size_t sgt_size; 444 445 if (vmw_tt->mapped) 446 return 0; 447 448 vsgt->mode = dev_priv->map_mode; 449 vsgt->pages = vmw_tt->dma_ttm.ttm.pages; 450 vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; 451 vsgt->addrs = vmw_tt->dma_ttm.dma_address; 452 vsgt->sgt = &vmw_tt->sgt; 453 454 switch (dev_priv->map_mode) { 455 case vmw_dma_map_bind: 456 case vmw_dma_map_populate: 457 if (unlikely(!sgl_size)) { 458 sgl_size = ttm_round_pot(sizeof(struct scatterlist)); 459 sgt_size = ttm_round_pot(sizeof(struct sg_table)); 460 } 461 vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages; 462 ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx); 463 if (unlikely(ret != 0)) 464 return ret; 465 466 ret = __sg_alloc_table_from_pages 467 (&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, 468 (unsigned long) vsgt->num_pages << PAGE_SHIFT, 469 dma_get_max_seg_size(dev_priv->dev->dev), 470 GFP_KERNEL); 471 if (unlikely(ret != 0)) 472 goto out_sg_alloc_fail; 473 474 if (vsgt->num_pages > vmw_tt->sgt.nents) { 475 uint64_t over_alloc = 476 sgl_size * (vsgt->num_pages - 477 vmw_tt->sgt.nents); 478 479 ttm_mem_global_free(glob, over_alloc); 480 vmw_tt->sg_alloc_size -= over_alloc; 481 } 482 483 ret = vmw_ttm_map_for_dma(vmw_tt); 484 if (unlikely(ret != 0)) 485 goto out_map_fail; 486 487 break; 488 default: 489 break; 490 } 491 492 old = ~((dma_addr_t) 0); 493 vmw_tt->vsgt.num_regions = 0; 494 for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) { 495 dma_addr_t cur = vmw_piter_dma_addr(&iter); 496 497 if (cur != old + PAGE_SIZE) 498 vmw_tt->vsgt.num_regions++; 499 old = cur; 500 } 501 502 vmw_tt->mapped = true; 503 return 0; 504 505 out_map_fail: 506 sg_free_table(vmw_tt->vsgt.sgt); 507 vmw_tt->vsgt.sgt = NULL; 508 out_sg_alloc_fail: 509 ttm_mem_global_free(glob, vmw_tt->sg_alloc_size); 510 return ret; 511 } 512 513 /** 514 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings 515 * 516 * @vmw_tt: Pointer to a struct vmw_ttm_tt 517 * 518 * Tear down any previously set up device DMA mappings and free 519 * any storage space allocated for them. If there are no mappings set up, 520 * this function is a NOP. 521 */ 522 static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) 523 { 524 struct vmw_private *dev_priv = vmw_tt->dev_priv; 525 526 if (!vmw_tt->vsgt.sgt) 527 return; 528 529 switch (dev_priv->map_mode) { 530 case vmw_dma_map_bind: 531 case vmw_dma_map_populate: 532 vmw_ttm_unmap_from_dma(vmw_tt); 533 sg_free_table(vmw_tt->vsgt.sgt); 534 vmw_tt->vsgt.sgt = NULL; 535 ttm_mem_global_free(vmw_mem_glob(dev_priv), 536 vmw_tt->sg_alloc_size); 537 break; 538 default: 539 break; 540 } 541 vmw_tt->mapped = false; 542 } 543 544 /** 545 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a 546 * TTM buffer object 547 * 548 * @bo: Pointer to a struct ttm_buffer_object 549 * 550 * Returns a pointer to a struct vmw_sg_table object. The object should 551 * not be freed after use. 552 * Note that for the device addresses to be valid, the buffer object must 553 * either be reserved or pinned. 554 */ 555 const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) 556 { 557 struct vmw_ttm_tt *vmw_tt = 558 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); 559 560 return &vmw_tt->vsgt; 561 } 562 563 564 static int vmw_ttm_bind(struct ttm_bo_device *bdev, 565 struct ttm_tt *ttm, struct ttm_resource *bo_mem) 566 { 567 struct vmw_ttm_tt *vmw_be = 568 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); 569 int ret = 0; 570 571 if (!bo_mem) 572 return -EINVAL; 573 574 if (vmw_be->bound) 575 return 0; 576 577 ret = vmw_ttm_map_dma(vmw_be); 578 if (unlikely(ret != 0)) 579 return ret; 580 581 vmw_be->gmr_id = bo_mem->start; 582 vmw_be->mem_type = bo_mem->mem_type; 583 584 switch (bo_mem->mem_type) { 585 case VMW_PL_GMR: 586 ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, 587 ttm->num_pages, vmw_be->gmr_id); 588 break; 589 case VMW_PL_MOB: 590 if (unlikely(vmw_be->mob == NULL)) { 591 vmw_be->mob = 592 vmw_mob_create(ttm->num_pages); 593 if (unlikely(vmw_be->mob == NULL)) 594 return -ENOMEM; 595 } 596 597 ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, 598 &vmw_be->vsgt, ttm->num_pages, 599 vmw_be->gmr_id); 600 break; 601 default: 602 BUG(); 603 } 604 vmw_be->bound = true; 605 return ret; 606 } 607 608 static void vmw_ttm_unbind(struct ttm_bo_device *bdev, 609 struct ttm_tt *ttm) 610 { 611 struct vmw_ttm_tt *vmw_be = 612 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); 613 614 if (!vmw_be->bound) 615 return; 616 617 switch (vmw_be->mem_type) { 618 case VMW_PL_GMR: 619 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); 620 break; 621 case VMW_PL_MOB: 622 vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); 623 break; 624 default: 625 BUG(); 626 } 627 628 if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) 629 vmw_ttm_unmap_dma(vmw_be); 630 vmw_be->bound = false; 631 } 632 633 634 static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) 635 { 636 struct vmw_ttm_tt *vmw_be = 637 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); 638 639 vmw_ttm_unbind(bdev, ttm); 640 ttm_tt_destroy_common(bdev, ttm); 641 vmw_ttm_unmap_dma(vmw_be); 642 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) 643 ttm_dma_tt_fini(&vmw_be->dma_ttm); 644 else 645 ttm_tt_fini(ttm); 646 647 if (vmw_be->mob) 648 vmw_mob_destroy(vmw_be->mob); 649 650 kfree(vmw_be); 651 } 652 653 654 static int vmw_ttm_populate(struct ttm_bo_device *bdev, 655 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) 656 { 657 struct vmw_ttm_tt *vmw_tt = 658 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); 659 struct vmw_private *dev_priv = vmw_tt->dev_priv; 660 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); 661 int ret; 662 663 if (ttm_tt_is_populated(ttm)) 664 return 0; 665 666 if (dev_priv->map_mode == vmw_dma_alloc_coherent) { 667 size_t size = 668 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); 669 ret = ttm_mem_global_alloc(glob, size, ctx); 670 if (unlikely(ret != 0)) 671 return ret; 672 673 ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev, 674 ctx); 675 if (unlikely(ret != 0)) 676 ttm_mem_global_free(glob, size); 677 } else 678 ret = ttm_pool_populate(ttm, ctx); 679 680 return ret; 681 } 682 683 static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev, 684 struct ttm_tt *ttm) 685 { 686 struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, 687 dma_ttm.ttm); 688 struct vmw_private *dev_priv = vmw_tt->dev_priv; 689 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); 690 691 692 if (vmw_tt->mob) { 693 vmw_mob_destroy(vmw_tt->mob); 694 vmw_tt->mob = NULL; 695 } 696 697 vmw_ttm_unmap_dma(vmw_tt); 698 if (dev_priv->map_mode == vmw_dma_alloc_coherent) { 699 size_t size = 700 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); 701 702 ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev); 703 ttm_mem_global_free(glob, size); 704 } else 705 ttm_pool_unpopulate(ttm); 706 } 707 708 static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, 709 uint32_t page_flags) 710 { 711 struct vmw_ttm_tt *vmw_be; 712 int ret; 713 714 vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL); 715 if (!vmw_be) 716 return NULL; 717 718 vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev); 719 vmw_be->mob = NULL; 720 721 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) 722 ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags); 723 else 724 ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags); 725 if (unlikely(ret != 0)) 726 goto out_no_init; 727 728 return &vmw_be->dma_ttm.ttm; 729 out_no_init: 730 kfree(vmw_be); 731 return NULL; 732 } 733 734 static void vmw_evict_flags(struct ttm_buffer_object *bo, 735 struct ttm_placement *placement) 736 { 737 *placement = vmw_sys_placement; 738 } 739 740 static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) 741 { 742 struct ttm_object_file *tfile = 743 vmw_fpriv((struct drm_file *)filp->private_data)->tfile; 744 745 return vmw_user_bo_verify_access(bo, tfile); 746 } 747 748 static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) 749 { 750 struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); 751 752 switch (mem->mem_type) { 753 case TTM_PL_SYSTEM: 754 case VMW_PL_GMR: 755 case VMW_PL_MOB: 756 return 0; 757 case TTM_PL_VRAM: 758 mem->bus.offset = (mem->start << PAGE_SHIFT) + 759 dev_priv->vram_start; 760 mem->bus.is_iomem = true; 761 break; 762 default: 763 return -EINVAL; 764 } 765 return 0; 766 } 767 768 /** 769 * vmw_move_notify - TTM move_notify_callback 770 * 771 * @bo: The TTM buffer object about to move. 772 * @mem: The struct ttm_resource indicating to what memory 773 * region the move is taking place. 774 * 775 * Calls move_notify for all subsystems needing it. 776 * (currently only resources). 777 */ 778 static void vmw_move_notify(struct ttm_buffer_object *bo, 779 bool evict, 780 struct ttm_resource *mem) 781 { 782 vmw_bo_move_notify(bo, mem); 783 vmw_query_move_notify(bo, mem); 784 } 785 786 787 /** 788 * vmw_swap_notify - TTM move_notify_callback 789 * 790 * @bo: The TTM buffer object about to be swapped out. 791 */ 792 static void vmw_swap_notify(struct ttm_buffer_object *bo) 793 { 794 vmw_bo_swap_notify(bo); 795 (void) ttm_bo_wait(bo, false, false); 796 } 797 798 799 struct ttm_bo_driver vmw_bo_driver = { 800 .ttm_tt_create = &vmw_ttm_tt_create, 801 .ttm_tt_populate = &vmw_ttm_populate, 802 .ttm_tt_unpopulate = &vmw_ttm_unpopulate, 803 .ttm_tt_bind = &vmw_ttm_bind, 804 .ttm_tt_unbind = &vmw_ttm_unbind, 805 .ttm_tt_destroy = &vmw_ttm_destroy, 806 .eviction_valuable = ttm_bo_eviction_valuable, 807 .evict_flags = vmw_evict_flags, 808 .move = NULL, 809 .verify_access = vmw_verify_access, 810 .move_notify = vmw_move_notify, 811 .swap_notify = vmw_swap_notify, 812 .io_mem_reserve = &vmw_ttm_io_mem_reserve, 813 }; 814 815 int vmw_bo_create_and_populate(struct vmw_private *dev_priv, 816 unsigned long bo_size, 817 struct ttm_buffer_object **bo_p) 818 { 819 struct ttm_operation_ctx ctx = { 820 .interruptible = false, 821 .no_wait_gpu = false 822 }; 823 struct ttm_buffer_object *bo; 824 int ret; 825 826 ret = ttm_bo_create(&dev_priv->bdev, bo_size, 827 ttm_bo_type_device, 828 &vmw_sys_ne_placement, 829 0, false, &bo); 830 831 if (unlikely(ret != 0)) 832 return ret; 833 834 ret = ttm_bo_reserve(bo, false, true, NULL); 835 BUG_ON(ret != 0); 836 ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx); 837 if (likely(ret == 0)) { 838 struct vmw_ttm_tt *vmw_tt = 839 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); 840 ret = vmw_ttm_map_dma(vmw_tt); 841 } 842 843 ttm_bo_unreserve(bo); 844 845 if (likely(ret == 0)) 846 *bo_p = bo; 847 return ret; 848 } 849