1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 29 #include <drm/ttm/ttm_placement.h> 30 31 #include "vmwgfx_drv.h" 32 #include "ttm_object.h" 33 34 35 /** 36 * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct 37 * vmw_buffer_object. 38 * 39 * @bo: Pointer to the TTM buffer object. 40 * Return: Pointer to the struct vmw_buffer_object embedding the 41 * TTM buffer object. 42 */ 43 static struct vmw_buffer_object * 44 vmw_buffer_object(struct ttm_buffer_object *bo) 45 { 46 return container_of(bo, struct vmw_buffer_object, base); 47 } 48 49 /** 50 * bo_is_vmw - check if the buffer object is a &vmw_buffer_object 51 * @bo: ttm buffer object to be checked 52 * 53 * Uses destroy function associated with the object to determine if this is 54 * a &vmw_buffer_object. 55 * 56 * Returns: 57 * true if the object is of &vmw_buffer_object type, false if not. 58 */ 59 static bool bo_is_vmw(struct ttm_buffer_object *bo) 60 { 61 return bo->destroy == &vmw_bo_bo_free || 62 bo->destroy == &vmw_gem_destroy; 63 } 64 65 /** 66 * vmw_bo_pin_in_placement - Validate a buffer to placement. 67 * 68 * @dev_priv: Driver private. 69 * @buf: DMA buffer to move. 70 * @placement: The placement to pin it. 71 * @interruptible: Use interruptible wait. 72 * Return: Zero on success, Negative error code on failure. In particular 73 * -ERESTARTSYS if interrupted by a signal 74 */ 75 int vmw_bo_pin_in_placement(struct vmw_private *dev_priv, 76 struct vmw_buffer_object *buf, 77 struct ttm_placement *placement, 78 bool interruptible) 79 { 80 struct ttm_operation_ctx ctx = {interruptible, false }; 81 struct ttm_buffer_object *bo = &buf->base; 82 int ret; 83 84 vmw_execbuf_release_pinned_bo(dev_priv); 85 86 ret = ttm_bo_reserve(bo, interruptible, false, NULL); 87 if (unlikely(ret != 0)) 88 goto err; 89 90 if (buf->base.pin_count > 0) 91 ret = ttm_resource_compat(bo->resource, placement) 92 ? 0 : -EINVAL; 93 else 94 ret = ttm_bo_validate(bo, placement, &ctx); 95 96 if (!ret) 97 vmw_bo_pin_reserved(buf, true); 98 99 ttm_bo_unreserve(bo); 100 err: 101 return ret; 102 } 103 104 105 /** 106 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr. 107 * 108 * This function takes the reservation_sem in write mode. 109 * Flushes and unpins the query bo to avoid failures. 110 * 111 * @dev_priv: Driver private. 112 * @buf: DMA buffer to move. 113 * @interruptible: Use interruptible wait. 114 * Return: Zero on success, Negative error code on failure. In particular 115 * -ERESTARTSYS if interrupted by a signal 116 */ 117 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, 118 struct vmw_buffer_object *buf, 119 bool interruptible) 120 { 121 struct ttm_operation_ctx ctx = {interruptible, false }; 122 struct ttm_buffer_object *bo = &buf->base; 123 int ret; 124 125 vmw_execbuf_release_pinned_bo(dev_priv); 126 127 ret = ttm_bo_reserve(bo, interruptible, false, NULL); 128 if (unlikely(ret != 0)) 129 goto err; 130 131 if (buf->base.pin_count > 0) { 132 ret = ttm_resource_compat(bo->resource, &vmw_vram_gmr_placement) 133 ? 0 : -EINVAL; 134 goto out_unreserve; 135 } 136 137 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx); 138 if (likely(ret == 0) || ret == -ERESTARTSYS) 139 goto out_unreserve; 140 141 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx); 142 143 out_unreserve: 144 if (!ret) 145 vmw_bo_pin_reserved(buf, true); 146 147 ttm_bo_unreserve(bo); 148 err: 149 return ret; 150 } 151 152 153 /** 154 * vmw_bo_pin_in_vram - Move a buffer to vram. 155 * 156 * This function takes the reservation_sem in write mode. 157 * Flushes and unpins the query bo to avoid failures. 158 * 159 * @dev_priv: Driver private. 160 * @buf: DMA buffer to move. 161 * @interruptible: Use interruptible wait. 162 * Return: Zero on success, Negative error code on failure. In particular 163 * -ERESTARTSYS if interrupted by a signal 164 */ 165 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv, 166 struct vmw_buffer_object *buf, 167 bool interruptible) 168 { 169 return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement, 170 interruptible); 171 } 172 173 174 /** 175 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram. 176 * 177 * This function takes the reservation_sem in write mode. 178 * Flushes and unpins the query bo to avoid failures. 179 * 180 * @dev_priv: Driver private. 181 * @buf: DMA buffer to pin. 182 * @interruptible: Use interruptible wait. 183 * Return: Zero on success, Negative error code on failure. In particular 184 * -ERESTARTSYS if interrupted by a signal 185 */ 186 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, 187 struct vmw_buffer_object *buf, 188 bool interruptible) 189 { 190 struct ttm_operation_ctx ctx = {interruptible, false }; 191 struct ttm_buffer_object *bo = &buf->base; 192 struct ttm_placement placement; 193 struct ttm_place place; 194 int ret = 0; 195 196 place = vmw_vram_placement.placement[0]; 197 place.lpfn = bo->resource->num_pages; 198 placement.num_placement = 1; 199 placement.placement = &place; 200 placement.num_busy_placement = 1; 201 placement.busy_placement = &place; 202 203 vmw_execbuf_release_pinned_bo(dev_priv); 204 ret = ttm_bo_reserve(bo, interruptible, false, NULL); 205 if (unlikely(ret != 0)) 206 goto err_unlock; 207 208 /* 209 * Is this buffer already in vram but not at the start of it? 210 * In that case, evict it first because TTM isn't good at handling 211 * that situation. 212 */ 213 if (bo->resource->mem_type == TTM_PL_VRAM && 214 bo->resource->start < bo->resource->num_pages && 215 bo->resource->start > 0 && 216 buf->base.pin_count == 0) { 217 ctx.interruptible = false; 218 (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx); 219 } 220 221 if (buf->base.pin_count > 0) 222 ret = ttm_resource_compat(bo->resource, &placement) 223 ? 0 : -EINVAL; 224 else 225 ret = ttm_bo_validate(bo, &placement, &ctx); 226 227 /* For some reason we didn't end up at the start of vram */ 228 WARN_ON(ret == 0 && bo->resource->start != 0); 229 if (!ret) 230 vmw_bo_pin_reserved(buf, true); 231 232 ttm_bo_unreserve(bo); 233 err_unlock: 234 235 return ret; 236 } 237 238 239 /** 240 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer. 241 * 242 * This function takes the reservation_sem in write mode. 243 * 244 * @dev_priv: Driver private. 245 * @buf: DMA buffer to unpin. 246 * @interruptible: Use interruptible wait. 247 * Return: Zero on success, Negative error code on failure. In particular 248 * -ERESTARTSYS if interrupted by a signal 249 */ 250 int vmw_bo_unpin(struct vmw_private *dev_priv, 251 struct vmw_buffer_object *buf, 252 bool interruptible) 253 { 254 struct ttm_buffer_object *bo = &buf->base; 255 int ret; 256 257 ret = ttm_bo_reserve(bo, interruptible, false, NULL); 258 if (unlikely(ret != 0)) 259 goto err; 260 261 vmw_bo_pin_reserved(buf, false); 262 263 ttm_bo_unreserve(bo); 264 265 err: 266 return ret; 267 } 268 269 /** 270 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement 271 * of a buffer. 272 * 273 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved. 274 * @ptr: SVGAGuestPtr returning the result. 275 */ 276 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, 277 SVGAGuestPtr *ptr) 278 { 279 if (bo->resource->mem_type == TTM_PL_VRAM) { 280 ptr->gmrId = SVGA_GMR_FRAMEBUFFER; 281 ptr->offset = bo->resource->start << PAGE_SHIFT; 282 } else { 283 ptr->gmrId = bo->resource->start; 284 ptr->offset = 0; 285 } 286 } 287 288 289 /** 290 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it. 291 * 292 * @vbo: The buffer object. Must be reserved. 293 * @pin: Whether to pin or unpin. 294 * 295 */ 296 void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin) 297 { 298 struct ttm_operation_ctx ctx = { false, true }; 299 struct ttm_place pl; 300 struct ttm_placement placement; 301 struct ttm_buffer_object *bo = &vbo->base; 302 uint32_t old_mem_type = bo->resource->mem_type; 303 int ret; 304 305 dma_resv_assert_held(bo->base.resv); 306 307 if (pin == !!bo->pin_count) 308 return; 309 310 pl.fpfn = 0; 311 pl.lpfn = 0; 312 pl.mem_type = bo->resource->mem_type; 313 pl.flags = bo->resource->placement; 314 315 memset(&placement, 0, sizeof(placement)); 316 placement.num_placement = 1; 317 placement.placement = &pl; 318 319 ret = ttm_bo_validate(bo, &placement, &ctx); 320 321 BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type); 322 323 if (pin) 324 ttm_bo_pin(bo); 325 else 326 ttm_bo_unpin(bo); 327 } 328 329 /** 330 * vmw_bo_map_and_cache - Map a buffer object and cache the map 331 * 332 * @vbo: The buffer object to map 333 * Return: A kernel virtual address or NULL if mapping failed. 334 * 335 * This function maps a buffer object into the kernel address space, or 336 * returns the virtual kernel address of an already existing map. The virtual 337 * address remains valid as long as the buffer object is pinned or reserved. 338 * The cached map is torn down on either 339 * 1) Buffer object move 340 * 2) Buffer object swapout 341 * 3) Buffer object destruction 342 * 343 */ 344 void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo) 345 { 346 struct ttm_buffer_object *bo = &vbo->base; 347 bool not_used; 348 void *virtual; 349 int ret; 350 351 virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used); 352 if (virtual) 353 return virtual; 354 355 ret = ttm_bo_kmap(bo, 0, bo->resource->num_pages, &vbo->map); 356 if (ret) 357 DRM_ERROR("Buffer object map failed: %d.\n", ret); 358 359 return ttm_kmap_obj_virtual(&vbo->map, ¬_used); 360 } 361 362 363 /** 364 * vmw_bo_unmap - Tear down a cached buffer object map. 365 * 366 * @vbo: The buffer object whose map we are tearing down. 367 * 368 * This function tears down a cached map set up using 369 * vmw_buffer_object_map_and_cache(). 370 */ 371 void vmw_bo_unmap(struct vmw_buffer_object *vbo) 372 { 373 if (vbo->map.bo == NULL) 374 return; 375 376 ttm_bo_kunmap(&vbo->map); 377 } 378 379 380 /** 381 * vmw_bo_bo_free - vmw buffer object destructor 382 * 383 * @bo: Pointer to the embedded struct ttm_buffer_object 384 */ 385 void vmw_bo_bo_free(struct ttm_buffer_object *bo) 386 { 387 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo); 388 389 WARN_ON(vmw_bo->dirty); 390 WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree)); 391 vmw_bo_unmap(vmw_bo); 392 drm_gem_object_release(&bo->base); 393 kfree(vmw_bo); 394 } 395 396 /* default destructor */ 397 static void vmw_bo_default_destroy(struct ttm_buffer_object *bo) 398 { 399 kfree(bo); 400 } 401 402 /** 403 * vmw_bo_create_kernel - Create a pinned BO for internal kernel use. 404 * 405 * @dev_priv: Pointer to the device private struct 406 * @size: size of the BO we need 407 * @placement: where to put it 408 * @p_bo: resulting BO 409 * 410 * Creates and pin a simple BO for in kernel use. 411 */ 412 int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, 413 struct ttm_placement *placement, 414 struct ttm_buffer_object **p_bo) 415 { 416 struct ttm_operation_ctx ctx = { 417 .interruptible = false, 418 .no_wait_gpu = false 419 }; 420 struct ttm_buffer_object *bo; 421 struct drm_device *vdev = &dev_priv->drm; 422 int ret; 423 424 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 425 if (unlikely(!bo)) 426 return -ENOMEM; 427 428 size = ALIGN(size, PAGE_SIZE); 429 430 drm_gem_private_object_init(vdev, &bo->base, size); 431 432 ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size, 433 ttm_bo_type_kernel, placement, 0, 434 &ctx, NULL, NULL, vmw_bo_default_destroy); 435 if (unlikely(ret)) 436 goto error_free; 437 438 ttm_bo_pin(bo); 439 ttm_bo_unreserve(bo); 440 *p_bo = bo; 441 442 return 0; 443 444 error_free: 445 kfree(bo); 446 return ret; 447 } 448 449 int vmw_bo_create(struct vmw_private *vmw, 450 size_t size, struct ttm_placement *placement, 451 bool interruptible, bool pin, 452 void (*bo_free)(struct ttm_buffer_object *bo), 453 struct vmw_buffer_object **p_bo) 454 { 455 int ret; 456 457 BUG_ON(!bo_free); 458 459 *p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL); 460 if (unlikely(!*p_bo)) { 461 DRM_ERROR("Failed to allocate a buffer.\n"); 462 return -ENOMEM; 463 } 464 465 ret = vmw_bo_init(vmw, *p_bo, size, 466 placement, interruptible, pin, 467 bo_free); 468 if (unlikely(ret != 0)) 469 goto out_error; 470 471 return ret; 472 out_error: 473 kfree(*p_bo); 474 *p_bo = NULL; 475 return ret; 476 } 477 478 /** 479 * vmw_bo_init - Initialize a vmw buffer object 480 * 481 * @dev_priv: Pointer to the device private struct 482 * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize. 483 * @size: Buffer object size in bytes. 484 * @placement: Initial placement. 485 * @interruptible: Whether waits should be performed interruptible. 486 * @pin: If the BO should be created pinned at a fixed location. 487 * @bo_free: The buffer object destructor. 488 * Returns: Zero on success, negative error code on error. 489 * 490 * Note that on error, the code will free the buffer object. 491 */ 492 int vmw_bo_init(struct vmw_private *dev_priv, 493 struct vmw_buffer_object *vmw_bo, 494 size_t size, struct ttm_placement *placement, 495 bool interruptible, bool pin, 496 void (*bo_free)(struct ttm_buffer_object *bo)) 497 { 498 struct ttm_operation_ctx ctx = { 499 .interruptible = interruptible, 500 .no_wait_gpu = false 501 }; 502 struct ttm_device *bdev = &dev_priv->bdev; 503 struct drm_device *vdev = &dev_priv->drm; 504 int ret; 505 506 WARN_ON_ONCE(!bo_free); 507 memset(vmw_bo, 0, sizeof(*vmw_bo)); 508 BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3); 509 vmw_bo->base.priority = 3; 510 vmw_bo->res_tree = RB_ROOT; 511 512 size = ALIGN(size, PAGE_SIZE); 513 drm_gem_private_object_init(vdev, &vmw_bo->base.base, size); 514 515 ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size, 516 ttm_bo_type_device, 517 placement, 518 0, &ctx, NULL, NULL, bo_free); 519 if (unlikely(ret)) { 520 return ret; 521 } 522 523 if (pin) 524 ttm_bo_pin(&vmw_bo->base); 525 ttm_bo_unreserve(&vmw_bo->base); 526 527 return 0; 528 } 529 530 /** 531 * vmw_user_bo_synccpu_grab - Grab a struct vmw_buffer_object for cpu 532 * access, idling previous GPU operations on the buffer and optionally 533 * blocking it for further command submissions. 534 * 535 * @vmw_bo: Pointer to the buffer object being grabbed for CPU access 536 * @flags: Flags indicating how the grab should be performed. 537 * Return: Zero on success, Negative error code on error. In particular, 538 * -EBUSY will be returned if a dontblock operation is requested and the 539 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is 540 * interrupted by a signal. 541 * 542 * A blocking grab will be automatically released when @tfile is closed. 543 */ 544 static int vmw_user_bo_synccpu_grab(struct vmw_buffer_object *vmw_bo, 545 uint32_t flags) 546 { 547 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); 548 struct ttm_buffer_object *bo = &vmw_bo->base; 549 int ret; 550 551 if (flags & drm_vmw_synccpu_allow_cs) { 552 long lret; 553 554 lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ, 555 true, nonblock ? 0 : 556 MAX_SCHEDULE_TIMEOUT); 557 if (!lret) 558 return -EBUSY; 559 else if (lret < 0) 560 return lret; 561 return 0; 562 } 563 564 ret = ttm_bo_reserve(bo, true, nonblock, NULL); 565 if (unlikely(ret != 0)) 566 return ret; 567 568 ret = ttm_bo_wait(bo, true, nonblock); 569 if (likely(ret == 0)) 570 atomic_inc(&vmw_bo->cpu_writers); 571 572 ttm_bo_unreserve(bo); 573 if (unlikely(ret != 0)) 574 return ret; 575 576 return ret; 577 } 578 579 /** 580 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access, 581 * and unblock command submission on the buffer if blocked. 582 * 583 * @filp: Identifying the caller. 584 * @handle: Handle identifying the buffer object. 585 * @flags: Flags indicating the type of release. 586 */ 587 static int vmw_user_bo_synccpu_release(struct drm_file *filp, 588 uint32_t handle, 589 uint32_t flags) 590 { 591 struct vmw_buffer_object *vmw_bo; 592 int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo); 593 594 if (!ret) { 595 if (!(flags & drm_vmw_synccpu_allow_cs)) { 596 atomic_dec(&vmw_bo->cpu_writers); 597 } 598 ttm_bo_put(&vmw_bo->base); 599 } 600 601 return ret; 602 } 603 604 605 /** 606 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu 607 * functionality. 608 * 609 * @dev: Identifies the drm device. 610 * @data: Pointer to the ioctl argument. 611 * @file_priv: Identifies the caller. 612 * Return: Zero on success, negative error code on error. 613 * 614 * This function checks the ioctl arguments for validity and calls the 615 * relevant synccpu functions. 616 */ 617 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, 618 struct drm_file *file_priv) 619 { 620 struct drm_vmw_synccpu_arg *arg = 621 (struct drm_vmw_synccpu_arg *) data; 622 struct vmw_buffer_object *vbo; 623 int ret; 624 625 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 626 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write | 627 drm_vmw_synccpu_dontblock | 628 drm_vmw_synccpu_allow_cs)) != 0) { 629 DRM_ERROR("Illegal synccpu flags.\n"); 630 return -EINVAL; 631 } 632 633 switch (arg->op) { 634 case drm_vmw_synccpu_grab: 635 ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo); 636 if (unlikely(ret != 0)) 637 return ret; 638 639 ret = vmw_user_bo_synccpu_grab(vbo, arg->flags); 640 vmw_bo_unreference(&vbo); 641 if (unlikely(ret != 0)) { 642 if (ret == -ERESTARTSYS || ret == -EBUSY) 643 return -EBUSY; 644 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", 645 (unsigned int) arg->handle); 646 return ret; 647 } 648 break; 649 case drm_vmw_synccpu_release: 650 ret = vmw_user_bo_synccpu_release(file_priv, 651 arg->handle, 652 arg->flags); 653 if (unlikely(ret != 0)) { 654 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", 655 (unsigned int) arg->handle); 656 return ret; 657 } 658 break; 659 default: 660 DRM_ERROR("Invalid synccpu operation.\n"); 661 return -EINVAL; 662 } 663 664 return 0; 665 } 666 667 /** 668 * vmw_bo_unref_ioctl - Generic handle close ioctl. 669 * 670 * @dev: Identifies the drm device. 671 * @data: Pointer to the ioctl argument. 672 * @file_priv: Identifies the caller. 673 * Return: Zero on success, negative error code on error. 674 * 675 * This function checks the ioctl arguments for validity and closes a 676 * handle to a TTM base object, optionally freeing the object. 677 */ 678 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, 679 struct drm_file *file_priv) 680 { 681 struct drm_vmw_unref_dmabuf_arg *arg = 682 (struct drm_vmw_unref_dmabuf_arg *)data; 683 684 drm_gem_handle_delete(file_priv, arg->handle); 685 return 0; 686 } 687 688 689 /** 690 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle. 691 * 692 * @filp: The file the handle is registered with. 693 * @handle: The user buffer object handle 694 * @out: Pointer to a where a pointer to the embedded 695 * struct vmw_buffer_object should be placed. 696 * Return: Zero on success, Negative error code on error. 697 * 698 * The vmw buffer object pointer will be refcounted. 699 */ 700 int vmw_user_bo_lookup(struct drm_file *filp, 701 uint32_t handle, 702 struct vmw_buffer_object **out) 703 { 704 struct drm_gem_object *gobj; 705 706 gobj = drm_gem_object_lookup(filp, handle); 707 if (!gobj) { 708 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", 709 (unsigned long)handle); 710 return -ESRCH; 711 } 712 713 *out = gem_to_vmw_bo(gobj); 714 ttm_bo_get(&(*out)->base); 715 drm_gem_object_put(gobj); 716 717 return 0; 718 } 719 720 /** 721 * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference 722 * @filp: The TTM object file the handle is registered with. 723 * @handle: The user buffer object handle. 724 * 725 * This function looks up a struct vmw_bo and returns a pointer to the 726 * struct vmw_buffer_object it derives from without refcounting the pointer. 727 * The returned pointer is only valid until vmw_user_bo_noref_release() is 728 * called, and the object pointed to by the returned pointer may be doomed. 729 * Any persistent usage of the object requires a refcount to be taken using 730 * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it 731 * needs to be paired with vmw_user_bo_noref_release() and no sleeping- 732 * or scheduling functions may be called inbetween these function calls. 733 * 734 * Return: A struct vmw_buffer_object pointer if successful or negative 735 * error pointer on failure. 736 */ 737 struct vmw_buffer_object * 738 vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle) 739 { 740 struct vmw_buffer_object *vmw_bo; 741 struct ttm_buffer_object *bo; 742 struct drm_gem_object *gobj = drm_gem_object_lookup(filp, handle); 743 744 if (!gobj) { 745 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", 746 (unsigned long)handle); 747 return ERR_PTR(-ESRCH); 748 } 749 vmw_bo = gem_to_vmw_bo(gobj); 750 bo = ttm_bo_get_unless_zero(&vmw_bo->base); 751 vmw_bo = vmw_buffer_object(bo); 752 drm_gem_object_put(gobj); 753 754 return vmw_bo; 755 } 756 757 758 /** 759 * vmw_bo_fence_single - Utility function to fence a single TTM buffer 760 * object without unreserving it. 761 * 762 * @bo: Pointer to the struct ttm_buffer_object to fence. 763 * @fence: Pointer to the fence. If NULL, this function will 764 * insert a fence into the command stream.. 765 * 766 * Contrary to the ttm_eu version of this function, it takes only 767 * a single buffer object instead of a list, and it also doesn't 768 * unreserve the buffer object, which needs to be done separately. 769 */ 770 void vmw_bo_fence_single(struct ttm_buffer_object *bo, 771 struct vmw_fence_obj *fence) 772 { 773 struct ttm_device *bdev = bo->bdev; 774 struct vmw_private *dev_priv = 775 container_of(bdev, struct vmw_private, bdev); 776 int ret; 777 778 if (fence == NULL) 779 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); 780 else 781 dma_fence_get(&fence->base); 782 783 ret = dma_resv_reserve_fences(bo->base.resv, 1); 784 if (!ret) 785 dma_resv_add_fence(bo->base.resv, &fence->base, 786 DMA_RESV_USAGE_KERNEL); 787 else 788 /* Last resort fallback when we are OOM */ 789 dma_fence_wait(&fence->base, false); 790 dma_fence_put(&fence->base); 791 } 792 793 794 /** 795 * vmw_dumb_create - Create a dumb kms buffer 796 * 797 * @file_priv: Pointer to a struct drm_file identifying the caller. 798 * @dev: Pointer to the drm device. 799 * @args: Pointer to a struct drm_mode_create_dumb structure 800 * Return: Zero on success, negative error code on failure. 801 * 802 * This is a driver callback for the core drm create_dumb functionality. 803 * Note that this is very similar to the vmw_bo_alloc ioctl, except 804 * that the arguments have a different format. 805 */ 806 int vmw_dumb_create(struct drm_file *file_priv, 807 struct drm_device *dev, 808 struct drm_mode_create_dumb *args) 809 { 810 struct vmw_private *dev_priv = vmw_priv(dev); 811 struct vmw_buffer_object *vbo; 812 int ret; 813 814 args->pitch = args->width * ((args->bpp + 7) / 8); 815 args->size = ALIGN(args->pitch * args->height, PAGE_SIZE); 816 817 ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, 818 args->size, &args->handle, 819 &vbo); 820 821 return ret; 822 } 823 824 /** 825 * vmw_bo_swap_notify - swapout notify callback. 826 * 827 * @bo: The buffer object to be swapped out. 828 */ 829 void vmw_bo_swap_notify(struct ttm_buffer_object *bo) 830 { 831 /* Is @bo embedded in a struct vmw_buffer_object? */ 832 if (!bo_is_vmw(bo)) 833 return; 834 835 /* Kill any cached kernel maps before swapout */ 836 vmw_bo_unmap(vmw_buffer_object(bo)); 837 } 838 839 840 /** 841 * vmw_bo_move_notify - TTM move_notify_callback 842 * 843 * @bo: The TTM buffer object about to move. 844 * @mem: The struct ttm_resource indicating to what memory 845 * region the move is taking place. 846 * 847 * Detaches cached maps and device bindings that require that the 848 * buffer doesn't move. 849 */ 850 void vmw_bo_move_notify(struct ttm_buffer_object *bo, 851 struct ttm_resource *mem) 852 { 853 struct vmw_buffer_object *vbo; 854 855 /* Make sure @bo is embedded in a struct vmw_buffer_object? */ 856 if (!bo_is_vmw(bo)) 857 return; 858 859 vbo = container_of(bo, struct vmw_buffer_object, base); 860 861 /* 862 * Kill any cached kernel maps before move to or from VRAM. 863 * With other types of moves, the underlying pages stay the same, 864 * and the map can be kept. 865 */ 866 if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM) 867 vmw_bo_unmap(vbo); 868 869 /* 870 * If we're moving a backup MOB out of MOB placement, then make sure we 871 * read back all resource content first, and unbind the MOB from 872 * the resource. 873 */ 874 if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB) 875 vmw_resource_unbind_list(vbo); 876 } 877