1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 29 #include <drm/ttm/ttm_placement.h> 30 31 #include "vmwgfx_drv.h" 32 #include "ttm_object.h" 33 34 35 /** 36 * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct 37 * vmw_buffer_object. 38 * 39 * @bo: Pointer to the TTM buffer object. 40 * Return: Pointer to the struct vmw_buffer_object embedding the 41 * TTM buffer object. 42 */ 43 static struct vmw_buffer_object * 44 vmw_buffer_object(struct ttm_buffer_object *bo) 45 { 46 return container_of(bo, struct vmw_buffer_object, base); 47 } 48 49 /** 50 * bo_is_vmw - check if the buffer object is a &vmw_buffer_object 51 * @bo: ttm buffer object to be checked 52 * 53 * Uses destroy function associated with the object to determine if this is 54 * a &vmw_buffer_object. 55 * 56 * Returns: 57 * true if the object is of &vmw_buffer_object type, false if not. 58 */ 59 static bool bo_is_vmw(struct ttm_buffer_object *bo) 60 { 61 return bo->destroy == &vmw_bo_bo_free || 62 bo->destroy == &vmw_gem_destroy; 63 } 64 65 /** 66 * vmw_bo_pin_in_placement - Validate a buffer to placement. 67 * 68 * @dev_priv: Driver private. 69 * @buf: DMA buffer to move. 70 * @placement: The placement to pin it. 71 * @interruptible: Use interruptible wait. 72 * Return: Zero on success, Negative error code on failure. In particular 73 * -ERESTARTSYS if interrupted by a signal 74 */ 75 int vmw_bo_pin_in_placement(struct vmw_private *dev_priv, 76 struct vmw_buffer_object *buf, 77 struct ttm_placement *placement, 78 bool interruptible) 79 { 80 struct ttm_operation_ctx ctx = {interruptible, false }; 81 struct ttm_buffer_object *bo = &buf->base; 82 int ret; 83 84 vmw_execbuf_release_pinned_bo(dev_priv); 85 86 ret = ttm_bo_reserve(bo, interruptible, false, NULL); 87 if (unlikely(ret != 0)) 88 goto err; 89 90 if (buf->base.pin_count > 0) 91 ret = ttm_resource_compat(bo->resource, placement) 92 ? 0 : -EINVAL; 93 else 94 ret = ttm_bo_validate(bo, placement, &ctx); 95 96 if (!ret) 97 vmw_bo_pin_reserved(buf, true); 98 99 ttm_bo_unreserve(bo); 100 err: 101 return ret; 102 } 103 104 105 /** 106 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr. 107 * 108 * This function takes the reservation_sem in write mode. 109 * Flushes and unpins the query bo to avoid failures. 110 * 111 * @dev_priv: Driver private. 112 * @buf: DMA buffer to move. 113 * @interruptible: Use interruptible wait. 114 * Return: Zero on success, Negative error code on failure. In particular 115 * -ERESTARTSYS if interrupted by a signal 116 */ 117 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, 118 struct vmw_buffer_object *buf, 119 bool interruptible) 120 { 121 struct ttm_operation_ctx ctx = {interruptible, false }; 122 struct ttm_buffer_object *bo = &buf->base; 123 int ret; 124 125 vmw_execbuf_release_pinned_bo(dev_priv); 126 127 ret = ttm_bo_reserve(bo, interruptible, false, NULL); 128 if (unlikely(ret != 0)) 129 goto err; 130 131 if (buf->base.pin_count > 0) { 132 ret = ttm_resource_compat(bo->resource, &vmw_vram_gmr_placement) 133 ? 0 : -EINVAL; 134 goto out_unreserve; 135 } 136 137 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx); 138 if (likely(ret == 0) || ret == -ERESTARTSYS) 139 goto out_unreserve; 140 141 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx); 142 143 out_unreserve: 144 if (!ret) 145 vmw_bo_pin_reserved(buf, true); 146 147 ttm_bo_unreserve(bo); 148 err: 149 return ret; 150 } 151 152 153 /** 154 * vmw_bo_pin_in_vram - Move a buffer to vram. 155 * 156 * This function takes the reservation_sem in write mode. 157 * Flushes and unpins the query bo to avoid failures. 158 * 159 * @dev_priv: Driver private. 160 * @buf: DMA buffer to move. 161 * @interruptible: Use interruptible wait. 162 * Return: Zero on success, Negative error code on failure. In particular 163 * -ERESTARTSYS if interrupted by a signal 164 */ 165 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv, 166 struct vmw_buffer_object *buf, 167 bool interruptible) 168 { 169 return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement, 170 interruptible); 171 } 172 173 174 /** 175 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram. 176 * 177 * This function takes the reservation_sem in write mode. 178 * Flushes and unpins the query bo to avoid failures. 179 * 180 * @dev_priv: Driver private. 181 * @buf: DMA buffer to pin. 182 * @interruptible: Use interruptible wait. 183 * Return: Zero on success, Negative error code on failure. In particular 184 * -ERESTARTSYS if interrupted by a signal 185 */ 186 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, 187 struct vmw_buffer_object *buf, 188 bool interruptible) 189 { 190 struct ttm_operation_ctx ctx = {interruptible, false }; 191 struct ttm_buffer_object *bo = &buf->base; 192 struct ttm_placement placement; 193 struct ttm_place place; 194 int ret = 0; 195 196 place = vmw_vram_placement.placement[0]; 197 place.lpfn = PFN_UP(bo->resource->size); 198 placement.num_placement = 1; 199 placement.placement = &place; 200 placement.num_busy_placement = 1; 201 placement.busy_placement = &place; 202 203 vmw_execbuf_release_pinned_bo(dev_priv); 204 ret = ttm_bo_reserve(bo, interruptible, false, NULL); 205 if (unlikely(ret != 0)) 206 goto err_unlock; 207 208 /* 209 * Is this buffer already in vram but not at the start of it? 210 * In that case, evict it first because TTM isn't good at handling 211 * that situation. 212 */ 213 if (bo->resource->mem_type == TTM_PL_VRAM && 214 bo->resource->start < PFN_UP(bo->resource->size) && 215 bo->resource->start > 0 && 216 buf->base.pin_count == 0) { 217 ctx.interruptible = false; 218 (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx); 219 } 220 221 if (buf->base.pin_count > 0) 222 ret = ttm_resource_compat(bo->resource, &placement) 223 ? 0 : -EINVAL; 224 else 225 ret = ttm_bo_validate(bo, &placement, &ctx); 226 227 /* For some reason we didn't end up at the start of vram */ 228 WARN_ON(ret == 0 && bo->resource->start != 0); 229 if (!ret) 230 vmw_bo_pin_reserved(buf, true); 231 232 ttm_bo_unreserve(bo); 233 err_unlock: 234 235 return ret; 236 } 237 238 239 /** 240 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer. 241 * 242 * This function takes the reservation_sem in write mode. 243 * 244 * @dev_priv: Driver private. 245 * @buf: DMA buffer to unpin. 246 * @interruptible: Use interruptible wait. 247 * Return: Zero on success, Negative error code on failure. In particular 248 * -ERESTARTSYS if interrupted by a signal 249 */ 250 int vmw_bo_unpin(struct vmw_private *dev_priv, 251 struct vmw_buffer_object *buf, 252 bool interruptible) 253 { 254 struct ttm_buffer_object *bo = &buf->base; 255 int ret; 256 257 ret = ttm_bo_reserve(bo, interruptible, false, NULL); 258 if (unlikely(ret != 0)) 259 goto err; 260 261 vmw_bo_pin_reserved(buf, false); 262 263 ttm_bo_unreserve(bo); 264 265 err: 266 return ret; 267 } 268 269 /** 270 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement 271 * of a buffer. 272 * 273 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved. 274 * @ptr: SVGAGuestPtr returning the result. 275 */ 276 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, 277 SVGAGuestPtr *ptr) 278 { 279 if (bo->resource->mem_type == TTM_PL_VRAM) { 280 ptr->gmrId = SVGA_GMR_FRAMEBUFFER; 281 ptr->offset = bo->resource->start << PAGE_SHIFT; 282 } else { 283 ptr->gmrId = bo->resource->start; 284 ptr->offset = 0; 285 } 286 } 287 288 289 /** 290 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it. 291 * 292 * @vbo: The buffer object. Must be reserved. 293 * @pin: Whether to pin or unpin. 294 * 295 */ 296 void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin) 297 { 298 struct ttm_operation_ctx ctx = { false, true }; 299 struct ttm_place pl; 300 struct ttm_placement placement; 301 struct ttm_buffer_object *bo = &vbo->base; 302 uint32_t old_mem_type = bo->resource->mem_type; 303 int ret; 304 305 dma_resv_assert_held(bo->base.resv); 306 307 if (pin == !!bo->pin_count) 308 return; 309 310 pl.fpfn = 0; 311 pl.lpfn = 0; 312 pl.mem_type = bo->resource->mem_type; 313 pl.flags = bo->resource->placement; 314 315 memset(&placement, 0, sizeof(placement)); 316 placement.num_placement = 1; 317 placement.placement = &pl; 318 319 ret = ttm_bo_validate(bo, &placement, &ctx); 320 321 BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type); 322 323 if (pin) 324 ttm_bo_pin(bo); 325 else 326 ttm_bo_unpin(bo); 327 } 328 329 /** 330 * vmw_bo_map_and_cache - Map a buffer object and cache the map 331 * 332 * @vbo: The buffer object to map 333 * Return: A kernel virtual address or NULL if mapping failed. 334 * 335 * This function maps a buffer object into the kernel address space, or 336 * returns the virtual kernel address of an already existing map. The virtual 337 * address remains valid as long as the buffer object is pinned or reserved. 338 * The cached map is torn down on either 339 * 1) Buffer object move 340 * 2) Buffer object swapout 341 * 3) Buffer object destruction 342 * 343 */ 344 void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo) 345 { 346 struct ttm_buffer_object *bo = &vbo->base; 347 bool not_used; 348 void *virtual; 349 int ret; 350 351 virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used); 352 if (virtual) 353 return virtual; 354 355 ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map); 356 if (ret) 357 DRM_ERROR("Buffer object map failed: %d.\n", ret); 358 359 return ttm_kmap_obj_virtual(&vbo->map, ¬_used); 360 } 361 362 363 /** 364 * vmw_bo_unmap - Tear down a cached buffer object map. 365 * 366 * @vbo: The buffer object whose map we are tearing down. 367 * 368 * This function tears down a cached map set up using 369 * vmw_buffer_object_map_and_cache(). 370 */ 371 void vmw_bo_unmap(struct vmw_buffer_object *vbo) 372 { 373 if (vbo->map.bo == NULL) 374 return; 375 376 ttm_bo_kunmap(&vbo->map); 377 } 378 379 380 /** 381 * vmw_bo_bo_free - vmw buffer object destructor 382 * 383 * @bo: Pointer to the embedded struct ttm_buffer_object 384 */ 385 void vmw_bo_bo_free(struct ttm_buffer_object *bo) 386 { 387 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo); 388 389 WARN_ON(vmw_bo->dirty); 390 WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree)); 391 vmw_bo_unmap(vmw_bo); 392 drm_gem_object_release(&bo->base); 393 kfree(vmw_bo); 394 } 395 396 /* default destructor */ 397 static void vmw_bo_default_destroy(struct ttm_buffer_object *bo) 398 { 399 kfree(bo); 400 } 401 402 /** 403 * vmw_bo_create_kernel - Create a pinned BO for internal kernel use. 404 * 405 * @dev_priv: Pointer to the device private struct 406 * @size: size of the BO we need 407 * @placement: where to put it 408 * @p_bo: resulting BO 409 * 410 * Creates and pin a simple BO for in kernel use. 411 */ 412 int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, 413 struct ttm_placement *placement, 414 struct ttm_buffer_object **p_bo) 415 { 416 struct ttm_operation_ctx ctx = { 417 .interruptible = false, 418 .no_wait_gpu = false 419 }; 420 struct ttm_buffer_object *bo; 421 struct drm_device *vdev = &dev_priv->drm; 422 int ret; 423 424 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 425 if (unlikely(!bo)) 426 return -ENOMEM; 427 428 size = ALIGN(size, PAGE_SIZE); 429 430 drm_gem_private_object_init(vdev, &bo->base, size); 431 432 ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, ttm_bo_type_kernel, 433 placement, 0, &ctx, NULL, NULL, 434 vmw_bo_default_destroy); 435 if (unlikely(ret)) 436 goto error_free; 437 438 ttm_bo_pin(bo); 439 ttm_bo_unreserve(bo); 440 *p_bo = bo; 441 442 return 0; 443 444 error_free: 445 kfree(bo); 446 return ret; 447 } 448 449 int vmw_bo_create(struct vmw_private *vmw, 450 size_t size, struct ttm_placement *placement, 451 bool interruptible, bool pin, 452 void (*bo_free)(struct ttm_buffer_object *bo), 453 struct vmw_buffer_object **p_bo) 454 { 455 int ret; 456 457 BUG_ON(!bo_free); 458 459 *p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL); 460 if (unlikely(!*p_bo)) { 461 DRM_ERROR("Failed to allocate a buffer.\n"); 462 return -ENOMEM; 463 } 464 465 /* 466 * vmw_bo_init will delete the *p_bo object if it fails 467 */ 468 ret = vmw_bo_init(vmw, *p_bo, size, 469 placement, interruptible, pin, 470 bo_free); 471 if (unlikely(ret != 0)) 472 goto out_error; 473 474 return ret; 475 out_error: 476 *p_bo = NULL; 477 return ret; 478 } 479 480 /** 481 * vmw_bo_init - Initialize a vmw buffer object 482 * 483 * @dev_priv: Pointer to the device private struct 484 * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize. 485 * @size: Buffer object size in bytes. 486 * @placement: Initial placement. 487 * @interruptible: Whether waits should be performed interruptible. 488 * @pin: If the BO should be created pinned at a fixed location. 489 * @bo_free: The buffer object destructor. 490 * Returns: Zero on success, negative error code on error. 491 * 492 * Note that on error, the code will free the buffer object. 493 */ 494 int vmw_bo_init(struct vmw_private *dev_priv, 495 struct vmw_buffer_object *vmw_bo, 496 size_t size, struct ttm_placement *placement, 497 bool interruptible, bool pin, 498 void (*bo_free)(struct ttm_buffer_object *bo)) 499 { 500 struct ttm_operation_ctx ctx = { 501 .interruptible = interruptible, 502 .no_wait_gpu = false 503 }; 504 struct ttm_device *bdev = &dev_priv->bdev; 505 struct drm_device *vdev = &dev_priv->drm; 506 int ret; 507 508 WARN_ON_ONCE(!bo_free); 509 memset(vmw_bo, 0, sizeof(*vmw_bo)); 510 BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3); 511 vmw_bo->base.priority = 3; 512 vmw_bo->res_tree = RB_ROOT; 513 514 size = ALIGN(size, PAGE_SIZE); 515 drm_gem_private_object_init(vdev, &vmw_bo->base.base, size); 516 517 ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, ttm_bo_type_device, 518 placement, 0, &ctx, NULL, NULL, bo_free); 519 if (unlikely(ret)) { 520 return ret; 521 } 522 523 if (pin) 524 ttm_bo_pin(&vmw_bo->base); 525 ttm_bo_unreserve(&vmw_bo->base); 526 527 return 0; 528 } 529 530 /** 531 * vmw_user_bo_synccpu_grab - Grab a struct vmw_buffer_object for cpu 532 * access, idling previous GPU operations on the buffer and optionally 533 * blocking it for further command submissions. 534 * 535 * @vmw_bo: Pointer to the buffer object being grabbed for CPU access 536 * @flags: Flags indicating how the grab should be performed. 537 * Return: Zero on success, Negative error code on error. In particular, 538 * -EBUSY will be returned if a dontblock operation is requested and the 539 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is 540 * interrupted by a signal. 541 * 542 * A blocking grab will be automatically released when @tfile is closed. 543 */ 544 static int vmw_user_bo_synccpu_grab(struct vmw_buffer_object *vmw_bo, 545 uint32_t flags) 546 { 547 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); 548 struct ttm_buffer_object *bo = &vmw_bo->base; 549 int ret; 550 551 if (flags & drm_vmw_synccpu_allow_cs) { 552 long lret; 553 554 lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ, 555 true, nonblock ? 0 : 556 MAX_SCHEDULE_TIMEOUT); 557 if (!lret) 558 return -EBUSY; 559 else if (lret < 0) 560 return lret; 561 return 0; 562 } 563 564 ret = ttm_bo_reserve(bo, true, nonblock, NULL); 565 if (unlikely(ret != 0)) 566 return ret; 567 568 ret = ttm_bo_wait(bo, true, nonblock); 569 if (likely(ret == 0)) 570 atomic_inc(&vmw_bo->cpu_writers); 571 572 ttm_bo_unreserve(bo); 573 if (unlikely(ret != 0)) 574 return ret; 575 576 return ret; 577 } 578 579 /** 580 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access, 581 * and unblock command submission on the buffer if blocked. 582 * 583 * @filp: Identifying the caller. 584 * @handle: Handle identifying the buffer object. 585 * @flags: Flags indicating the type of release. 586 */ 587 static int vmw_user_bo_synccpu_release(struct drm_file *filp, 588 uint32_t handle, 589 uint32_t flags) 590 { 591 struct vmw_buffer_object *vmw_bo; 592 int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo); 593 594 if (!ret) { 595 if (!(flags & drm_vmw_synccpu_allow_cs)) { 596 atomic_dec(&vmw_bo->cpu_writers); 597 } 598 ttm_bo_put(&vmw_bo->base); 599 } 600 601 drm_gem_object_put(&vmw_bo->base.base); 602 return ret; 603 } 604 605 606 /** 607 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu 608 * functionality. 609 * 610 * @dev: Identifies the drm device. 611 * @data: Pointer to the ioctl argument. 612 * @file_priv: Identifies the caller. 613 * Return: Zero on success, negative error code on error. 614 * 615 * This function checks the ioctl arguments for validity and calls the 616 * relevant synccpu functions. 617 */ 618 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, 619 struct drm_file *file_priv) 620 { 621 struct drm_vmw_synccpu_arg *arg = 622 (struct drm_vmw_synccpu_arg *) data; 623 struct vmw_buffer_object *vbo; 624 int ret; 625 626 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 627 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write | 628 drm_vmw_synccpu_dontblock | 629 drm_vmw_synccpu_allow_cs)) != 0) { 630 DRM_ERROR("Illegal synccpu flags.\n"); 631 return -EINVAL; 632 } 633 634 switch (arg->op) { 635 case drm_vmw_synccpu_grab: 636 ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo); 637 if (unlikely(ret != 0)) 638 return ret; 639 640 ret = vmw_user_bo_synccpu_grab(vbo, arg->flags); 641 vmw_bo_unreference(&vbo); 642 drm_gem_object_put(&vbo->base.base); 643 if (unlikely(ret != 0)) { 644 if (ret == -ERESTARTSYS || ret == -EBUSY) 645 return -EBUSY; 646 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", 647 (unsigned int) arg->handle); 648 return ret; 649 } 650 break; 651 case drm_vmw_synccpu_release: 652 ret = vmw_user_bo_synccpu_release(file_priv, 653 arg->handle, 654 arg->flags); 655 if (unlikely(ret != 0)) { 656 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", 657 (unsigned int) arg->handle); 658 return ret; 659 } 660 break; 661 default: 662 DRM_ERROR("Invalid synccpu operation.\n"); 663 return -EINVAL; 664 } 665 666 return 0; 667 } 668 669 /** 670 * vmw_bo_unref_ioctl - Generic handle close ioctl. 671 * 672 * @dev: Identifies the drm device. 673 * @data: Pointer to the ioctl argument. 674 * @file_priv: Identifies the caller. 675 * Return: Zero on success, negative error code on error. 676 * 677 * This function checks the ioctl arguments for validity and closes a 678 * handle to a TTM base object, optionally freeing the object. 679 */ 680 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, 681 struct drm_file *file_priv) 682 { 683 struct drm_vmw_unref_dmabuf_arg *arg = 684 (struct drm_vmw_unref_dmabuf_arg *)data; 685 686 drm_gem_handle_delete(file_priv, arg->handle); 687 return 0; 688 } 689 690 691 /** 692 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle. 693 * 694 * @filp: The file the handle is registered with. 695 * @handle: The user buffer object handle 696 * @out: Pointer to a where a pointer to the embedded 697 * struct vmw_buffer_object should be placed. 698 * Return: Zero on success, Negative error code on error. 699 * 700 * The vmw buffer object pointer will be refcounted (both ttm and gem) 701 */ 702 int vmw_user_bo_lookup(struct drm_file *filp, 703 uint32_t handle, 704 struct vmw_buffer_object **out) 705 { 706 struct drm_gem_object *gobj; 707 708 gobj = drm_gem_object_lookup(filp, handle); 709 if (!gobj) { 710 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", 711 (unsigned long)handle); 712 return -ESRCH; 713 } 714 715 *out = gem_to_vmw_bo(gobj); 716 ttm_bo_get(&(*out)->base); 717 718 return 0; 719 } 720 721 /** 722 * vmw_bo_fence_single - Utility function to fence a single TTM buffer 723 * object without unreserving it. 724 * 725 * @bo: Pointer to the struct ttm_buffer_object to fence. 726 * @fence: Pointer to the fence. If NULL, this function will 727 * insert a fence into the command stream.. 728 * 729 * Contrary to the ttm_eu version of this function, it takes only 730 * a single buffer object instead of a list, and it also doesn't 731 * unreserve the buffer object, which needs to be done separately. 732 */ 733 void vmw_bo_fence_single(struct ttm_buffer_object *bo, 734 struct vmw_fence_obj *fence) 735 { 736 struct ttm_device *bdev = bo->bdev; 737 struct vmw_private *dev_priv = 738 container_of(bdev, struct vmw_private, bdev); 739 int ret; 740 741 if (fence == NULL) 742 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); 743 else 744 dma_fence_get(&fence->base); 745 746 ret = dma_resv_reserve_fences(bo->base.resv, 1); 747 if (!ret) 748 dma_resv_add_fence(bo->base.resv, &fence->base, 749 DMA_RESV_USAGE_KERNEL); 750 else 751 /* Last resort fallback when we are OOM */ 752 dma_fence_wait(&fence->base, false); 753 dma_fence_put(&fence->base); 754 } 755 756 757 /** 758 * vmw_dumb_create - Create a dumb kms buffer 759 * 760 * @file_priv: Pointer to a struct drm_file identifying the caller. 761 * @dev: Pointer to the drm device. 762 * @args: Pointer to a struct drm_mode_create_dumb structure 763 * Return: Zero on success, negative error code on failure. 764 * 765 * This is a driver callback for the core drm create_dumb functionality. 766 * Note that this is very similar to the vmw_bo_alloc ioctl, except 767 * that the arguments have a different format. 768 */ 769 int vmw_dumb_create(struct drm_file *file_priv, 770 struct drm_device *dev, 771 struct drm_mode_create_dumb *args) 772 { 773 struct vmw_private *dev_priv = vmw_priv(dev); 774 struct vmw_buffer_object *vbo; 775 int cpp = DIV_ROUND_UP(args->bpp, 8); 776 int ret; 777 778 switch (cpp) { 779 case 1: /* DRM_FORMAT_C8 */ 780 case 2: /* DRM_FORMAT_RGB565 */ 781 case 4: /* DRM_FORMAT_XRGB8888 */ 782 break; 783 default: 784 /* 785 * Dumb buffers don't allow anything else. 786 * This is tested via IGT's dumb_buffers 787 */ 788 return -EINVAL; 789 } 790 791 args->pitch = args->width * cpp; 792 args->size = ALIGN(args->pitch * args->height, PAGE_SIZE); 793 794 ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, 795 args->size, &args->handle, 796 &vbo); 797 /* drop reference from allocate - handle holds it now */ 798 drm_gem_object_put(&vbo->base.base); 799 return ret; 800 } 801 802 /** 803 * vmw_bo_swap_notify - swapout notify callback. 804 * 805 * @bo: The buffer object to be swapped out. 806 */ 807 void vmw_bo_swap_notify(struct ttm_buffer_object *bo) 808 { 809 /* Is @bo embedded in a struct vmw_buffer_object? */ 810 if (!bo_is_vmw(bo)) 811 return; 812 813 /* Kill any cached kernel maps before swapout */ 814 vmw_bo_unmap(vmw_buffer_object(bo)); 815 } 816 817 818 /** 819 * vmw_bo_move_notify - TTM move_notify_callback 820 * 821 * @bo: The TTM buffer object about to move. 822 * @mem: The struct ttm_resource indicating to what memory 823 * region the move is taking place. 824 * 825 * Detaches cached maps and device bindings that require that the 826 * buffer doesn't move. 827 */ 828 void vmw_bo_move_notify(struct ttm_buffer_object *bo, 829 struct ttm_resource *mem) 830 { 831 struct vmw_buffer_object *vbo; 832 833 /* Make sure @bo is embedded in a struct vmw_buffer_object? */ 834 if (!bo_is_vmw(bo)) 835 return; 836 837 vbo = container_of(bo, struct vmw_buffer_object, base); 838 839 /* 840 * Kill any cached kernel maps before move to or from VRAM. 841 * With other types of moves, the underlying pages stay the same, 842 * and the map can be kept. 843 */ 844 if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM) 845 vmw_bo_unmap(vbo); 846 847 /* 848 * If we're moving a backup MOB out of MOB placement, then make sure we 849 * read back all resource content first, and unbind the MOB from 850 * the resource. 851 */ 852 if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB) 853 vmw_resource_unbind_list(vbo); 854 } 855