1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 29 #include <drm/ttm/ttm_placement.h> 30 31 #include "vmwgfx_drv.h" 32 #include "ttm_object.h" 33 34 35 /** 36 * struct vmw_user_buffer_object - User-space-visible buffer object 37 * 38 * @prime: The prime object providing user visibility. 39 * @vbo: The struct vmw_buffer_object 40 */ 41 struct vmw_user_buffer_object { 42 struct ttm_prime_object prime; 43 struct vmw_buffer_object vbo; 44 }; 45 46 47 /** 48 * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct 49 * vmw_buffer_object. 50 * 51 * @bo: Pointer to the TTM buffer object. 52 * Return: Pointer to the struct vmw_buffer_object embedding the 53 * TTM buffer object. 54 */ 55 static struct vmw_buffer_object * 56 vmw_buffer_object(struct ttm_buffer_object *bo) 57 { 58 return container_of(bo, struct vmw_buffer_object, base); 59 } 60 61 62 /** 63 * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct 64 * vmw_user_buffer_object. 65 * 66 * @bo: Pointer to the TTM buffer object. 67 * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer 68 * object. 69 */ 70 static struct vmw_user_buffer_object * 71 vmw_user_buffer_object(struct ttm_buffer_object *bo) 72 { 73 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo); 74 75 return container_of(vmw_bo, struct vmw_user_buffer_object, vbo); 76 } 77 78 79 /** 80 * vmw_bo_pin_in_placement - Validate a buffer to placement. 81 * 82 * @dev_priv: Driver private. 83 * @buf: DMA buffer to move. 84 * @placement: The placement to pin it. 85 * @interruptible: Use interruptible wait. 86 * Return: Zero on success, Negative error code on failure. In particular 87 * -ERESTARTSYS if interrupted by a signal 88 */ 89 int vmw_bo_pin_in_placement(struct vmw_private *dev_priv, 90 struct vmw_buffer_object *buf, 91 struct ttm_placement *placement, 92 bool interruptible) 93 { 94 struct ttm_operation_ctx ctx = {interruptible, false }; 95 struct ttm_buffer_object *bo = &buf->base; 96 int ret; 97 uint32_t new_flags; 98 99 vmw_execbuf_release_pinned_bo(dev_priv); 100 101 ret = ttm_bo_reserve(bo, interruptible, false, NULL); 102 if (unlikely(ret != 0)) 103 goto err; 104 105 if (buf->base.pin_count > 0) 106 ret = ttm_bo_mem_compat(placement, bo->resource, 107 &new_flags) == true ? 0 : -EINVAL; 108 else 109 ret = ttm_bo_validate(bo, placement, &ctx); 110 111 if (!ret) 112 vmw_bo_pin_reserved(buf, true); 113 114 ttm_bo_unreserve(bo); 115 err: 116 return ret; 117 } 118 119 120 /** 121 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr. 122 * 123 * This function takes the reservation_sem in write mode. 124 * Flushes and unpins the query bo to avoid failures. 125 * 126 * @dev_priv: Driver private. 127 * @buf: DMA buffer to move. 128 * @interruptible: Use interruptible wait. 129 * Return: Zero on success, Negative error code on failure. In particular 130 * -ERESTARTSYS if interrupted by a signal 131 */ 132 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, 133 struct vmw_buffer_object *buf, 134 bool interruptible) 135 { 136 struct ttm_operation_ctx ctx = {interruptible, false }; 137 struct ttm_buffer_object *bo = &buf->base; 138 int ret; 139 uint32_t new_flags; 140 141 vmw_execbuf_release_pinned_bo(dev_priv); 142 143 ret = ttm_bo_reserve(bo, interruptible, false, NULL); 144 if (unlikely(ret != 0)) 145 goto err; 146 147 if (buf->base.pin_count > 0) { 148 ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, bo->resource, 149 &new_flags) == true ? 0 : -EINVAL; 150 goto out_unreserve; 151 } 152 153 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx); 154 if (likely(ret == 0) || ret == -ERESTARTSYS) 155 goto out_unreserve; 156 157 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx); 158 159 out_unreserve: 160 if (!ret) 161 vmw_bo_pin_reserved(buf, true); 162 163 ttm_bo_unreserve(bo); 164 err: 165 return ret; 166 } 167 168 169 /** 170 * vmw_bo_pin_in_vram - Move a buffer to vram. 171 * 172 * This function takes the reservation_sem in write mode. 173 * Flushes and unpins the query bo to avoid failures. 174 * 175 * @dev_priv: Driver private. 176 * @buf: DMA buffer to move. 177 * @interruptible: Use interruptible wait. 178 * Return: Zero on success, Negative error code on failure. In particular 179 * -ERESTARTSYS if interrupted by a signal 180 */ 181 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv, 182 struct vmw_buffer_object *buf, 183 bool interruptible) 184 { 185 return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement, 186 interruptible); 187 } 188 189 190 /** 191 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram. 192 * 193 * This function takes the reservation_sem in write mode. 194 * Flushes and unpins the query bo to avoid failures. 195 * 196 * @dev_priv: Driver private. 197 * @buf: DMA buffer to pin. 198 * @interruptible: Use interruptible wait. 199 * Return: Zero on success, Negative error code on failure. In particular 200 * -ERESTARTSYS if interrupted by a signal 201 */ 202 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, 203 struct vmw_buffer_object *buf, 204 bool interruptible) 205 { 206 struct ttm_operation_ctx ctx = {interruptible, false }; 207 struct ttm_buffer_object *bo = &buf->base; 208 struct ttm_placement placement; 209 struct ttm_place place; 210 int ret = 0; 211 uint32_t new_flags; 212 213 place = vmw_vram_placement.placement[0]; 214 place.lpfn = bo->resource->num_pages; 215 placement.num_placement = 1; 216 placement.placement = &place; 217 placement.num_busy_placement = 1; 218 placement.busy_placement = &place; 219 220 vmw_execbuf_release_pinned_bo(dev_priv); 221 ret = ttm_bo_reserve(bo, interruptible, false, NULL); 222 if (unlikely(ret != 0)) 223 goto err_unlock; 224 225 /* 226 * Is this buffer already in vram but not at the start of it? 227 * In that case, evict it first because TTM isn't good at handling 228 * that situation. 229 */ 230 if (bo->resource->mem_type == TTM_PL_VRAM && 231 bo->resource->start < bo->resource->num_pages && 232 bo->resource->start > 0 && 233 buf->base.pin_count == 0) { 234 ctx.interruptible = false; 235 (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx); 236 } 237 238 if (buf->base.pin_count > 0) 239 ret = ttm_bo_mem_compat(&placement, bo->resource, 240 &new_flags) == true ? 0 : -EINVAL; 241 else 242 ret = ttm_bo_validate(bo, &placement, &ctx); 243 244 /* For some reason we didn't end up at the start of vram */ 245 WARN_ON(ret == 0 && bo->resource->start != 0); 246 if (!ret) 247 vmw_bo_pin_reserved(buf, true); 248 249 ttm_bo_unreserve(bo); 250 err_unlock: 251 252 return ret; 253 } 254 255 256 /** 257 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer. 258 * 259 * This function takes the reservation_sem in write mode. 260 * 261 * @dev_priv: Driver private. 262 * @buf: DMA buffer to unpin. 263 * @interruptible: Use interruptible wait. 264 * Return: Zero on success, Negative error code on failure. In particular 265 * -ERESTARTSYS if interrupted by a signal 266 */ 267 int vmw_bo_unpin(struct vmw_private *dev_priv, 268 struct vmw_buffer_object *buf, 269 bool interruptible) 270 { 271 struct ttm_buffer_object *bo = &buf->base; 272 int ret; 273 274 ret = ttm_bo_reserve(bo, interruptible, false, NULL); 275 if (unlikely(ret != 0)) 276 goto err; 277 278 vmw_bo_pin_reserved(buf, false); 279 280 ttm_bo_unreserve(bo); 281 282 err: 283 return ret; 284 } 285 286 /** 287 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement 288 * of a buffer. 289 * 290 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved. 291 * @ptr: SVGAGuestPtr returning the result. 292 */ 293 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, 294 SVGAGuestPtr *ptr) 295 { 296 if (bo->resource->mem_type == TTM_PL_VRAM) { 297 ptr->gmrId = SVGA_GMR_FRAMEBUFFER; 298 ptr->offset = bo->resource->start << PAGE_SHIFT; 299 } else { 300 ptr->gmrId = bo->resource->start; 301 ptr->offset = 0; 302 } 303 } 304 305 306 /** 307 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it. 308 * 309 * @vbo: The buffer object. Must be reserved. 310 * @pin: Whether to pin or unpin. 311 * 312 */ 313 void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin) 314 { 315 struct ttm_operation_ctx ctx = { false, true }; 316 struct ttm_place pl; 317 struct ttm_placement placement; 318 struct ttm_buffer_object *bo = &vbo->base; 319 uint32_t old_mem_type = bo->resource->mem_type; 320 int ret; 321 322 dma_resv_assert_held(bo->base.resv); 323 324 if (pin == !!bo->pin_count) 325 return; 326 327 pl.fpfn = 0; 328 pl.lpfn = 0; 329 pl.mem_type = bo->resource->mem_type; 330 pl.flags = bo->resource->placement; 331 332 memset(&placement, 0, sizeof(placement)); 333 placement.num_placement = 1; 334 placement.placement = &pl; 335 336 ret = ttm_bo_validate(bo, &placement, &ctx); 337 338 BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type); 339 340 if (pin) 341 ttm_bo_pin(bo); 342 else 343 ttm_bo_unpin(bo); 344 } 345 346 /** 347 * vmw_bo_map_and_cache - Map a buffer object and cache the map 348 * 349 * @vbo: The buffer object to map 350 * Return: A kernel virtual address or NULL if mapping failed. 351 * 352 * This function maps a buffer object into the kernel address space, or 353 * returns the virtual kernel address of an already existing map. The virtual 354 * address remains valid as long as the buffer object is pinned or reserved. 355 * The cached map is torn down on either 356 * 1) Buffer object move 357 * 2) Buffer object swapout 358 * 3) Buffer object destruction 359 * 360 */ 361 void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo) 362 { 363 struct ttm_buffer_object *bo = &vbo->base; 364 bool not_used; 365 void *virtual; 366 int ret; 367 368 virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used); 369 if (virtual) 370 return virtual; 371 372 ret = ttm_bo_kmap(bo, 0, bo->resource->num_pages, &vbo->map); 373 if (ret) 374 DRM_ERROR("Buffer object map failed: %d.\n", ret); 375 376 return ttm_kmap_obj_virtual(&vbo->map, ¬_used); 377 } 378 379 380 /** 381 * vmw_bo_unmap - Tear down a cached buffer object map. 382 * 383 * @vbo: The buffer object whose map we are tearing down. 384 * 385 * This function tears down a cached map set up using 386 * vmw_buffer_object_map_and_cache(). 387 */ 388 void vmw_bo_unmap(struct vmw_buffer_object *vbo) 389 { 390 if (vbo->map.bo == NULL) 391 return; 392 393 ttm_bo_kunmap(&vbo->map); 394 } 395 396 397 /** 398 * vmw_bo_acc_size - Calculate the pinned memory usage of buffers 399 * 400 * @dev_priv: Pointer to a struct vmw_private identifying the device. 401 * @size: The requested buffer size. 402 * @user: Whether this is an ordinary dma buffer or a user dma buffer. 403 */ 404 static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size, 405 bool user) 406 { 407 static size_t struct_size, user_struct_size; 408 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 409 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *)); 410 411 if (unlikely(struct_size == 0)) { 412 size_t backend_size = ttm_round_pot(vmw_tt_size); 413 414 struct_size = backend_size + 415 ttm_round_pot(sizeof(struct vmw_buffer_object)); 416 user_struct_size = backend_size + 417 ttm_round_pot(sizeof(struct vmw_user_buffer_object)) + 418 TTM_OBJ_EXTRA_SIZE; 419 } 420 421 if (dev_priv->map_mode == vmw_dma_alloc_coherent) 422 page_array_size += 423 ttm_round_pot(num_pages * sizeof(dma_addr_t)); 424 425 return ((user) ? user_struct_size : struct_size) + 426 page_array_size; 427 } 428 429 430 /** 431 * vmw_bo_bo_free - vmw buffer object destructor 432 * 433 * @bo: Pointer to the embedded struct ttm_buffer_object 434 */ 435 void vmw_bo_bo_free(struct ttm_buffer_object *bo) 436 { 437 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo); 438 439 WARN_ON(vmw_bo->dirty); 440 WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree)); 441 vmw_bo_unmap(vmw_bo); 442 dma_resv_fini(&bo->base._resv); 443 kfree(vmw_bo); 444 } 445 446 447 /** 448 * vmw_user_bo_destroy - vmw buffer object destructor 449 * 450 * @bo: Pointer to the embedded struct ttm_buffer_object 451 */ 452 static void vmw_user_bo_destroy(struct ttm_buffer_object *bo) 453 { 454 struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo); 455 struct vmw_buffer_object *vbo = &vmw_user_bo->vbo; 456 457 WARN_ON(vbo->dirty); 458 WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree)); 459 vmw_bo_unmap(vbo); 460 ttm_prime_object_kfree(vmw_user_bo, prime); 461 } 462 463 /** 464 * vmw_bo_create_kernel - Create a pinned BO for internal kernel use. 465 * 466 * @dev_priv: Pointer to the device private struct 467 * @size: size of the BO we need 468 * @placement: where to put it 469 * @p_bo: resulting BO 470 * 471 * Creates and pin a simple BO for in kernel use. 472 */ 473 int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, 474 struct ttm_placement *placement, 475 struct ttm_buffer_object **p_bo) 476 { 477 unsigned npages = PAGE_ALIGN(size) >> PAGE_SHIFT; 478 struct ttm_operation_ctx ctx = { false, false }; 479 struct ttm_buffer_object *bo; 480 size_t acc_size; 481 int ret; 482 483 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 484 if (unlikely(!bo)) 485 return -ENOMEM; 486 487 acc_size = ttm_round_pot(sizeof(*bo)); 488 acc_size += ttm_round_pot(npages * sizeof(void *)); 489 acc_size += ttm_round_pot(sizeof(struct ttm_tt)); 490 491 ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx); 492 if (unlikely(ret)) 493 goto error_free; 494 495 496 bo->base.size = size; 497 dma_resv_init(&bo->base._resv); 498 drm_vma_node_reset(&bo->base.vma_node); 499 500 ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size, 501 ttm_bo_type_device, placement, 0, 502 &ctx, NULL, NULL, NULL); 503 if (unlikely(ret)) 504 goto error_account; 505 506 ttm_bo_pin(bo); 507 ttm_bo_unreserve(bo); 508 *p_bo = bo; 509 510 return 0; 511 512 error_account: 513 ttm_mem_global_free(&ttm_mem_glob, acc_size); 514 515 error_free: 516 kfree(bo); 517 return ret; 518 } 519 520 /** 521 * vmw_bo_init - Initialize a vmw buffer object 522 * 523 * @dev_priv: Pointer to the device private struct 524 * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize. 525 * @size: Buffer object size in bytes. 526 * @placement: Initial placement. 527 * @interruptible: Whether waits should be performed interruptible. 528 * @pin: If the BO should be created pinned at a fixed location. 529 * @bo_free: The buffer object destructor. 530 * Returns: Zero on success, negative error code on error. 531 * 532 * Note that on error, the code will free the buffer object. 533 */ 534 int vmw_bo_init(struct vmw_private *dev_priv, 535 struct vmw_buffer_object *vmw_bo, 536 size_t size, struct ttm_placement *placement, 537 bool interruptible, bool pin, 538 void (*bo_free)(struct ttm_buffer_object *bo)) 539 { 540 struct ttm_operation_ctx ctx = { interruptible, false }; 541 struct ttm_device *bdev = &dev_priv->bdev; 542 size_t acc_size; 543 int ret; 544 bool user = (bo_free == &vmw_user_bo_destroy); 545 546 WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free))); 547 548 acc_size = vmw_bo_acc_size(dev_priv, size, user); 549 memset(vmw_bo, 0, sizeof(*vmw_bo)); 550 BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3); 551 vmw_bo->base.priority = 3; 552 vmw_bo->res_tree = RB_ROOT; 553 554 ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx); 555 if (unlikely(ret)) 556 return ret; 557 558 vmw_bo->base.base.size = size; 559 dma_resv_init(&vmw_bo->base.base._resv); 560 drm_vma_node_reset(&vmw_bo->base.base.vma_node); 561 562 ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size, 563 ttm_bo_type_device, placement, 564 0, &ctx, NULL, NULL, bo_free); 565 if (unlikely(ret)) { 566 ttm_mem_global_free(&ttm_mem_glob, acc_size); 567 return ret; 568 } 569 570 if (pin) 571 ttm_bo_pin(&vmw_bo->base); 572 ttm_bo_unreserve(&vmw_bo->base); 573 return 0; 574 } 575 576 577 /** 578 * vmw_user_bo_release - TTM reference base object release callback for 579 * vmw user buffer objects 580 * 581 * @p_base: The TTM base object pointer about to be unreferenced. 582 * 583 * Clears the TTM base object pointer and drops the reference the 584 * base object has on the underlying struct vmw_buffer_object. 585 */ 586 static void vmw_user_bo_release(struct ttm_base_object **p_base) 587 { 588 struct vmw_user_buffer_object *vmw_user_bo; 589 struct ttm_base_object *base = *p_base; 590 591 *p_base = NULL; 592 593 if (unlikely(base == NULL)) 594 return; 595 596 vmw_user_bo = container_of(base, struct vmw_user_buffer_object, 597 prime.base); 598 ttm_bo_put(&vmw_user_bo->vbo.base); 599 } 600 601 602 /** 603 * vmw_user_bo_ref_obj_release - TTM synccpu reference object release callback 604 * for vmw user buffer objects 605 * 606 * @base: Pointer to the TTM base object 607 * @ref_type: Reference type of the reference reaching zero. 608 * 609 * Called when user-space drops its last synccpu reference on the buffer 610 * object, Either explicitly or as part of a cleanup file close. 611 */ 612 static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base, 613 enum ttm_ref_type ref_type) 614 { 615 struct vmw_user_buffer_object *user_bo; 616 617 user_bo = container_of(base, struct vmw_user_buffer_object, prime.base); 618 619 switch (ref_type) { 620 case TTM_REF_SYNCCPU_WRITE: 621 atomic_dec(&user_bo->vbo.cpu_writers); 622 break; 623 default: 624 WARN_ONCE(true, "Undefined buffer object reference release.\n"); 625 } 626 } 627 628 629 /** 630 * vmw_user_bo_alloc - Allocate a user buffer object 631 * 632 * @dev_priv: Pointer to a struct device private. 633 * @tfile: Pointer to a struct ttm_object_file on which to register the user 634 * object. 635 * @size: Size of the buffer object. 636 * @shareable: Boolean whether the buffer is shareable with other open files. 637 * @handle: Pointer to where the handle value should be assigned. 638 * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer 639 * should be assigned. 640 * @p_base: The TTM base object pointer about to be allocated. 641 * Return: Zero on success, negative error code on error. 642 */ 643 int vmw_user_bo_alloc(struct vmw_private *dev_priv, 644 struct ttm_object_file *tfile, 645 uint32_t size, 646 bool shareable, 647 uint32_t *handle, 648 struct vmw_buffer_object **p_vbo, 649 struct ttm_base_object **p_base) 650 { 651 struct vmw_user_buffer_object *user_bo; 652 int ret; 653 654 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); 655 if (unlikely(!user_bo)) { 656 DRM_ERROR("Failed to allocate a buffer.\n"); 657 return -ENOMEM; 658 } 659 660 ret = vmw_bo_init(dev_priv, &user_bo->vbo, size, 661 (dev_priv->has_mob) ? 662 &vmw_sys_placement : 663 &vmw_vram_sys_placement, true, false, 664 &vmw_user_bo_destroy); 665 if (unlikely(ret != 0)) 666 return ret; 667 668 ttm_bo_get(&user_bo->vbo.base); 669 ret = ttm_prime_object_init(tfile, 670 size, 671 &user_bo->prime, 672 shareable, 673 ttm_buffer_type, 674 &vmw_user_bo_release, 675 &vmw_user_bo_ref_obj_release); 676 if (unlikely(ret != 0)) { 677 ttm_bo_put(&user_bo->vbo.base); 678 goto out_no_base_object; 679 } 680 681 *p_vbo = &user_bo->vbo; 682 if (p_base) { 683 *p_base = &user_bo->prime.base; 684 kref_get(&(*p_base)->refcount); 685 } 686 *handle = user_bo->prime.base.handle; 687 688 out_no_base_object: 689 return ret; 690 } 691 692 693 /** 694 * vmw_user_bo_verify_access - verify access permissions on this 695 * buffer object. 696 * 697 * @bo: Pointer to the buffer object being accessed 698 * @tfile: Identifying the caller. 699 */ 700 int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, 701 struct ttm_object_file *tfile) 702 { 703 struct vmw_user_buffer_object *vmw_user_bo; 704 705 if (unlikely(bo->destroy != vmw_user_bo_destroy)) 706 return -EPERM; 707 708 vmw_user_bo = vmw_user_buffer_object(bo); 709 710 /* Check that the caller has opened the object. */ 711 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base))) 712 return 0; 713 714 DRM_ERROR("Could not grant buffer access.\n"); 715 return -EPERM; 716 } 717 718 719 /** 720 * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu 721 * access, idling previous GPU operations on the buffer and optionally 722 * blocking it for further command submissions. 723 * 724 * @user_bo: Pointer to the buffer object being grabbed for CPU access 725 * @tfile: Identifying the caller. 726 * @flags: Flags indicating how the grab should be performed. 727 * Return: Zero on success, Negative error code on error. In particular, 728 * -EBUSY will be returned if a dontblock operation is requested and the 729 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is 730 * interrupted by a signal. 731 * 732 * A blocking grab will be automatically released when @tfile is closed. 733 */ 734 static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, 735 struct ttm_object_file *tfile, 736 uint32_t flags) 737 { 738 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); 739 struct ttm_buffer_object *bo = &user_bo->vbo.base; 740 bool existed; 741 int ret; 742 743 if (flags & drm_vmw_synccpu_allow_cs) { 744 long lret; 745 746 lret = dma_resv_wait_timeout(bo->base.resv, true, true, 747 nonblock ? 0 : 748 MAX_SCHEDULE_TIMEOUT); 749 if (!lret) 750 return -EBUSY; 751 else if (lret < 0) 752 return lret; 753 return 0; 754 } 755 756 ret = ttm_bo_reserve(bo, true, nonblock, NULL); 757 if (unlikely(ret != 0)) 758 return ret; 759 760 ret = ttm_bo_wait(bo, true, nonblock); 761 if (likely(ret == 0)) 762 atomic_inc(&user_bo->vbo.cpu_writers); 763 764 ttm_bo_unreserve(bo); 765 if (unlikely(ret != 0)) 766 return ret; 767 768 ret = ttm_ref_object_add(tfile, &user_bo->prime.base, 769 TTM_REF_SYNCCPU_WRITE, &existed, false); 770 if (ret != 0 || existed) 771 atomic_dec(&user_bo->vbo.cpu_writers); 772 773 return ret; 774 } 775 776 /** 777 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access, 778 * and unblock command submission on the buffer if blocked. 779 * 780 * @handle: Handle identifying the buffer object. 781 * @tfile: Identifying the caller. 782 * @flags: Flags indicating the type of release. 783 */ 784 static int vmw_user_bo_synccpu_release(uint32_t handle, 785 struct ttm_object_file *tfile, 786 uint32_t flags) 787 { 788 if (!(flags & drm_vmw_synccpu_allow_cs)) 789 return ttm_ref_object_base_unref(tfile, handle, 790 TTM_REF_SYNCCPU_WRITE); 791 792 return 0; 793 } 794 795 796 /** 797 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu 798 * functionality. 799 * 800 * @dev: Identifies the drm device. 801 * @data: Pointer to the ioctl argument. 802 * @file_priv: Identifies the caller. 803 * Return: Zero on success, negative error code on error. 804 * 805 * This function checks the ioctl arguments for validity and calls the 806 * relevant synccpu functions. 807 */ 808 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, 809 struct drm_file *file_priv) 810 { 811 struct drm_vmw_synccpu_arg *arg = 812 (struct drm_vmw_synccpu_arg *) data; 813 struct vmw_buffer_object *vbo; 814 struct vmw_user_buffer_object *user_bo; 815 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 816 struct ttm_base_object *buffer_base; 817 int ret; 818 819 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 820 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write | 821 drm_vmw_synccpu_dontblock | 822 drm_vmw_synccpu_allow_cs)) != 0) { 823 DRM_ERROR("Illegal synccpu flags.\n"); 824 return -EINVAL; 825 } 826 827 switch (arg->op) { 828 case drm_vmw_synccpu_grab: 829 ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo, 830 &buffer_base); 831 if (unlikely(ret != 0)) 832 return ret; 833 834 user_bo = container_of(vbo, struct vmw_user_buffer_object, 835 vbo); 836 ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags); 837 vmw_bo_unreference(&vbo); 838 ttm_base_object_unref(&buffer_base); 839 if (unlikely(ret != 0 && ret != -ERESTARTSYS && 840 ret != -EBUSY)) { 841 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", 842 (unsigned int) arg->handle); 843 return ret; 844 } 845 break; 846 case drm_vmw_synccpu_release: 847 ret = vmw_user_bo_synccpu_release(arg->handle, tfile, 848 arg->flags); 849 if (unlikely(ret != 0)) { 850 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", 851 (unsigned int) arg->handle); 852 return ret; 853 } 854 break; 855 default: 856 DRM_ERROR("Invalid synccpu operation.\n"); 857 return -EINVAL; 858 } 859 860 return 0; 861 } 862 863 864 /** 865 * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object 866 * allocation functionality. 867 * 868 * @dev: Identifies the drm device. 869 * @data: Pointer to the ioctl argument. 870 * @file_priv: Identifies the caller. 871 * Return: Zero on success, negative error code on error. 872 * 873 * This function checks the ioctl arguments for validity and allocates a 874 * struct vmw_user_buffer_object bo. 875 */ 876 int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data, 877 struct drm_file *file_priv) 878 { 879 struct vmw_private *dev_priv = vmw_priv(dev); 880 union drm_vmw_alloc_dmabuf_arg *arg = 881 (union drm_vmw_alloc_dmabuf_arg *)data; 882 struct drm_vmw_alloc_dmabuf_req *req = &arg->req; 883 struct drm_vmw_dmabuf_rep *rep = &arg->rep; 884 struct vmw_buffer_object *vbo; 885 uint32_t handle; 886 int ret; 887 888 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, 889 req->size, false, &handle, &vbo, 890 NULL); 891 if (unlikely(ret != 0)) 892 goto out_no_bo; 893 894 rep->handle = handle; 895 rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node); 896 rep->cur_gmr_id = handle; 897 rep->cur_gmr_offset = 0; 898 899 vmw_bo_unreference(&vbo); 900 901 out_no_bo: 902 903 return ret; 904 } 905 906 907 /** 908 * vmw_bo_unref_ioctl - Generic handle close ioctl. 909 * 910 * @dev: Identifies the drm device. 911 * @data: Pointer to the ioctl argument. 912 * @file_priv: Identifies the caller. 913 * Return: Zero on success, negative error code on error. 914 * 915 * This function checks the ioctl arguments for validity and closes a 916 * handle to a TTM base object, optionally freeing the object. 917 */ 918 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, 919 struct drm_file *file_priv) 920 { 921 struct drm_vmw_unref_dmabuf_arg *arg = 922 (struct drm_vmw_unref_dmabuf_arg *)data; 923 924 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, 925 arg->handle, 926 TTM_REF_USAGE); 927 } 928 929 930 /** 931 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle. 932 * 933 * @tfile: The TTM object file the handle is registered with. 934 * @handle: The user buffer object handle 935 * @out: Pointer to a where a pointer to the embedded 936 * struct vmw_buffer_object should be placed. 937 * @p_base: Pointer to where a pointer to the TTM base object should be 938 * placed, or NULL if no such pointer is required. 939 * Return: Zero on success, Negative error code on error. 940 * 941 * Both the output base object pointer and the vmw buffer object pointer 942 * will be refcounted. 943 */ 944 int vmw_user_bo_lookup(struct ttm_object_file *tfile, 945 uint32_t handle, struct vmw_buffer_object **out, 946 struct ttm_base_object **p_base) 947 { 948 struct vmw_user_buffer_object *vmw_user_bo; 949 struct ttm_base_object *base; 950 951 base = ttm_base_object_lookup(tfile, handle); 952 if (unlikely(base == NULL)) { 953 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", 954 (unsigned long)handle); 955 return -ESRCH; 956 } 957 958 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { 959 ttm_base_object_unref(&base); 960 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", 961 (unsigned long)handle); 962 return -EINVAL; 963 } 964 965 vmw_user_bo = container_of(base, struct vmw_user_buffer_object, 966 prime.base); 967 ttm_bo_get(&vmw_user_bo->vbo.base); 968 if (p_base) 969 *p_base = base; 970 else 971 ttm_base_object_unref(&base); 972 *out = &vmw_user_bo->vbo; 973 974 return 0; 975 } 976 977 /** 978 * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference 979 * @tfile: The TTM object file the handle is registered with. 980 * @handle: The user buffer object handle. 981 * 982 * This function looks up a struct vmw_user_bo and returns a pointer to the 983 * struct vmw_buffer_object it derives from without refcounting the pointer. 984 * The returned pointer is only valid until vmw_user_bo_noref_release() is 985 * called, and the object pointed to by the returned pointer may be doomed. 986 * Any persistent usage of the object requires a refcount to be taken using 987 * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it 988 * needs to be paired with vmw_user_bo_noref_release() and no sleeping- 989 * or scheduling functions may be called inbetween these function calls. 990 * 991 * Return: A struct vmw_buffer_object pointer if successful or negative 992 * error pointer on failure. 993 */ 994 struct vmw_buffer_object * 995 vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle) 996 { 997 struct vmw_user_buffer_object *vmw_user_bo; 998 struct ttm_base_object *base; 999 1000 base = ttm_base_object_noref_lookup(tfile, handle); 1001 if (!base) { 1002 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", 1003 (unsigned long)handle); 1004 return ERR_PTR(-ESRCH); 1005 } 1006 1007 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { 1008 ttm_base_object_noref_release(); 1009 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", 1010 (unsigned long)handle); 1011 return ERR_PTR(-EINVAL); 1012 } 1013 1014 vmw_user_bo = container_of(base, struct vmw_user_buffer_object, 1015 prime.base); 1016 return &vmw_user_bo->vbo; 1017 } 1018 1019 /** 1020 * vmw_user_bo_reference - Open a handle to a vmw user buffer object. 1021 * 1022 * @tfile: The TTM object file to register the handle with. 1023 * @vbo: The embedded vmw buffer object. 1024 * @handle: Pointer to where the new handle should be placed. 1025 * Return: Zero on success, Negative error code on error. 1026 */ 1027 int vmw_user_bo_reference(struct ttm_object_file *tfile, 1028 struct vmw_buffer_object *vbo, 1029 uint32_t *handle) 1030 { 1031 struct vmw_user_buffer_object *user_bo; 1032 1033 if (vbo->base.destroy != vmw_user_bo_destroy) 1034 return -EINVAL; 1035 1036 user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo); 1037 1038 *handle = user_bo->prime.base.handle; 1039 return ttm_ref_object_add(tfile, &user_bo->prime.base, 1040 TTM_REF_USAGE, NULL, false); 1041 } 1042 1043 1044 /** 1045 * vmw_bo_fence_single - Utility function to fence a single TTM buffer 1046 * object without unreserving it. 1047 * 1048 * @bo: Pointer to the struct ttm_buffer_object to fence. 1049 * @fence: Pointer to the fence. If NULL, this function will 1050 * insert a fence into the command stream.. 1051 * 1052 * Contrary to the ttm_eu version of this function, it takes only 1053 * a single buffer object instead of a list, and it also doesn't 1054 * unreserve the buffer object, which needs to be done separately. 1055 */ 1056 void vmw_bo_fence_single(struct ttm_buffer_object *bo, 1057 struct vmw_fence_obj *fence) 1058 { 1059 struct ttm_device *bdev = bo->bdev; 1060 1061 struct vmw_private *dev_priv = 1062 container_of(bdev, struct vmw_private, bdev); 1063 1064 if (fence == NULL) { 1065 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); 1066 dma_resv_add_excl_fence(bo->base.resv, &fence->base); 1067 dma_fence_put(&fence->base); 1068 } else 1069 dma_resv_add_excl_fence(bo->base.resv, &fence->base); 1070 } 1071 1072 1073 /** 1074 * vmw_dumb_create - Create a dumb kms buffer 1075 * 1076 * @file_priv: Pointer to a struct drm_file identifying the caller. 1077 * @dev: Pointer to the drm device. 1078 * @args: Pointer to a struct drm_mode_create_dumb structure 1079 * Return: Zero on success, negative error code on failure. 1080 * 1081 * This is a driver callback for the core drm create_dumb functionality. 1082 * Note that this is very similar to the vmw_bo_alloc ioctl, except 1083 * that the arguments have a different format. 1084 */ 1085 int vmw_dumb_create(struct drm_file *file_priv, 1086 struct drm_device *dev, 1087 struct drm_mode_create_dumb *args) 1088 { 1089 struct vmw_private *dev_priv = vmw_priv(dev); 1090 struct vmw_buffer_object *vbo; 1091 int ret; 1092 1093 args->pitch = args->width * ((args->bpp + 7) / 8); 1094 args->size = args->pitch * args->height; 1095 1096 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, 1097 args->size, false, &args->handle, 1098 &vbo, NULL); 1099 if (unlikely(ret != 0)) 1100 goto out_no_bo; 1101 1102 vmw_bo_unreference(&vbo); 1103 out_no_bo: 1104 return ret; 1105 } 1106 1107 1108 /** 1109 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer 1110 * 1111 * @file_priv: Pointer to a struct drm_file identifying the caller. 1112 * @dev: Pointer to the drm device. 1113 * @handle: Handle identifying the dumb buffer. 1114 * @offset: The address space offset returned. 1115 * Return: Zero on success, negative error code on failure. 1116 * 1117 * This is a driver callback for the core drm dumb_map_offset functionality. 1118 */ 1119 int vmw_dumb_map_offset(struct drm_file *file_priv, 1120 struct drm_device *dev, uint32_t handle, 1121 uint64_t *offset) 1122 { 1123 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1124 struct vmw_buffer_object *out_buf; 1125 int ret; 1126 1127 ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL); 1128 if (ret != 0) 1129 return -EINVAL; 1130 1131 *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node); 1132 vmw_bo_unreference(&out_buf); 1133 return 0; 1134 } 1135 1136 1137 /** 1138 * vmw_dumb_destroy - Destroy a dumb boffer 1139 * 1140 * @file_priv: Pointer to a struct drm_file identifying the caller. 1141 * @dev: Pointer to the drm device. 1142 * @handle: Handle identifying the dumb buffer. 1143 * Return: Zero on success, negative error code on failure. 1144 * 1145 * This is a driver callback for the core drm dumb_destroy functionality. 1146 */ 1147 int vmw_dumb_destroy(struct drm_file *file_priv, 1148 struct drm_device *dev, 1149 uint32_t handle) 1150 { 1151 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, 1152 handle, TTM_REF_USAGE); 1153 } 1154 1155 1156 /** 1157 * vmw_bo_swap_notify - swapout notify callback. 1158 * 1159 * @bo: The buffer object to be swapped out. 1160 */ 1161 void vmw_bo_swap_notify(struct ttm_buffer_object *bo) 1162 { 1163 /* Is @bo embedded in a struct vmw_buffer_object? */ 1164 if (bo->destroy != vmw_bo_bo_free && 1165 bo->destroy != vmw_user_bo_destroy) 1166 return; 1167 1168 /* Kill any cached kernel maps before swapout */ 1169 vmw_bo_unmap(vmw_buffer_object(bo)); 1170 } 1171 1172 1173 /** 1174 * vmw_bo_move_notify - TTM move_notify_callback 1175 * 1176 * @bo: The TTM buffer object about to move. 1177 * @mem: The struct ttm_resource indicating to what memory 1178 * region the move is taking place. 1179 * 1180 * Detaches cached maps and device bindings that require that the 1181 * buffer doesn't move. 1182 */ 1183 void vmw_bo_move_notify(struct ttm_buffer_object *bo, 1184 struct ttm_resource *mem) 1185 { 1186 struct vmw_buffer_object *vbo; 1187 1188 /* Make sure @bo is embedded in a struct vmw_buffer_object? */ 1189 if (bo->destroy != vmw_bo_bo_free && 1190 bo->destroy != vmw_user_bo_destroy) 1191 return; 1192 1193 vbo = container_of(bo, struct vmw_buffer_object, base); 1194 1195 /* 1196 * Kill any cached kernel maps before move to or from VRAM. 1197 * With other types of moves, the underlying pages stay the same, 1198 * and the map can be kept. 1199 */ 1200 if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM) 1201 vmw_bo_unmap(vbo); 1202 1203 /* 1204 * If we're moving a backup MOB out of MOB placement, then make sure we 1205 * read back all resource content first, and unbind the MOB from 1206 * the resource. 1207 */ 1208 if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB) 1209 vmw_resource_unbind_list(vbo); 1210 } 1211