1 /************************************************************************** 2 * 3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include "vmwgfx_drv.h" 29 #include "vmwgfx_resource_priv.h" 30 #include <ttm/ttm_placement.h> 31 #include "svga3d_surfacedefs.h" 32 33 /** 34 * struct vmw_user_surface - User-space visible surface resource 35 * 36 * @base: The TTM base object handling user-space visibility. 37 * @srf: The surface metadata. 38 * @size: TTM accounting size for the surface. 39 * @master: master of the creating client. Used for security check. 40 */ 41 struct vmw_user_surface { 42 struct ttm_prime_object prime; 43 struct vmw_surface srf; 44 uint32_t size; 45 struct drm_master *master; 46 }; 47 48 /** 49 * struct vmw_surface_offset - Backing store mip level offset info 50 * 51 * @face: Surface face. 52 * @mip: Mip level. 53 * @bo_offset: Offset into backing store of this mip level. 54 * 55 */ 56 struct vmw_surface_offset { 57 uint32_t face; 58 uint32_t mip; 59 uint32_t bo_offset; 60 }; 61 62 static void vmw_user_surface_free(struct vmw_resource *res); 63 static struct vmw_resource * 64 vmw_user_surface_base_to_res(struct ttm_base_object *base); 65 static int vmw_legacy_srf_bind(struct vmw_resource *res, 66 struct ttm_validate_buffer *val_buf); 67 static int vmw_legacy_srf_unbind(struct vmw_resource *res, 68 bool readback, 69 struct ttm_validate_buffer *val_buf); 70 static int vmw_legacy_srf_create(struct vmw_resource *res); 71 static int vmw_legacy_srf_destroy(struct vmw_resource *res); 72 static int vmw_gb_surface_create(struct vmw_resource *res); 73 static int vmw_gb_surface_bind(struct vmw_resource *res, 74 struct ttm_validate_buffer *val_buf); 75 static int vmw_gb_surface_unbind(struct vmw_resource *res, 76 bool readback, 77 struct ttm_validate_buffer *val_buf); 78 static int vmw_gb_surface_destroy(struct vmw_resource *res); 79 80 81 static const struct vmw_user_resource_conv user_surface_conv = { 82 .object_type = VMW_RES_SURFACE, 83 .base_obj_to_res = vmw_user_surface_base_to_res, 84 .res_free = vmw_user_surface_free 85 }; 86 87 const struct vmw_user_resource_conv *user_surface_converter = 88 &user_surface_conv; 89 90 91 static uint64_t vmw_user_surface_size; 92 93 static const struct vmw_res_func vmw_legacy_surface_func = { 94 .res_type = vmw_res_surface, 95 .needs_backup = false, 96 .may_evict = true, 97 .type_name = "legacy surfaces", 98 .backup_placement = &vmw_srf_placement, 99 .create = &vmw_legacy_srf_create, 100 .destroy = &vmw_legacy_srf_destroy, 101 .bind = &vmw_legacy_srf_bind, 102 .unbind = &vmw_legacy_srf_unbind 103 }; 104 105 static const struct vmw_res_func vmw_gb_surface_func = { 106 .res_type = vmw_res_surface, 107 .needs_backup = true, 108 .may_evict = true, 109 .type_name = "guest backed surfaces", 110 .backup_placement = &vmw_mob_placement, 111 .create = vmw_gb_surface_create, 112 .destroy = vmw_gb_surface_destroy, 113 .bind = vmw_gb_surface_bind, 114 .unbind = vmw_gb_surface_unbind 115 }; 116 117 /** 118 * struct vmw_surface_dma - SVGA3D DMA command 119 */ 120 struct vmw_surface_dma { 121 SVGA3dCmdHeader header; 122 SVGA3dCmdSurfaceDMA body; 123 SVGA3dCopyBox cb; 124 SVGA3dCmdSurfaceDMASuffix suffix; 125 }; 126 127 /** 128 * struct vmw_surface_define - SVGA3D Surface Define command 129 */ 130 struct vmw_surface_define { 131 SVGA3dCmdHeader header; 132 SVGA3dCmdDefineSurface body; 133 }; 134 135 /** 136 * struct vmw_surface_destroy - SVGA3D Surface Destroy command 137 */ 138 struct vmw_surface_destroy { 139 SVGA3dCmdHeader header; 140 SVGA3dCmdDestroySurface body; 141 }; 142 143 144 /** 145 * vmw_surface_dma_size - Compute fifo size for a dma command. 146 * 147 * @srf: Pointer to a struct vmw_surface 148 * 149 * Computes the required size for a surface dma command for backup or 150 * restoration of the surface represented by @srf. 151 */ 152 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf) 153 { 154 return srf->num_sizes * sizeof(struct vmw_surface_dma); 155 } 156 157 158 /** 159 * vmw_surface_define_size - Compute fifo size for a surface define command. 160 * 161 * @srf: Pointer to a struct vmw_surface 162 * 163 * Computes the required size for a surface define command for the definition 164 * of the surface represented by @srf. 165 */ 166 static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf) 167 { 168 return sizeof(struct vmw_surface_define) + srf->num_sizes * 169 sizeof(SVGA3dSize); 170 } 171 172 173 /** 174 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command. 175 * 176 * Computes the required size for a surface destroy command for the destruction 177 * of a hw surface. 178 */ 179 static inline uint32_t vmw_surface_destroy_size(void) 180 { 181 return sizeof(struct vmw_surface_destroy); 182 } 183 184 /** 185 * vmw_surface_destroy_encode - Encode a surface_destroy command. 186 * 187 * @id: The surface id 188 * @cmd_space: Pointer to memory area in which the commands should be encoded. 189 */ 190 static void vmw_surface_destroy_encode(uint32_t id, 191 void *cmd_space) 192 { 193 struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *) 194 cmd_space; 195 196 cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY; 197 cmd->header.size = sizeof(cmd->body); 198 cmd->body.sid = id; 199 } 200 201 /** 202 * vmw_surface_define_encode - Encode a surface_define command. 203 * 204 * @srf: Pointer to a struct vmw_surface object. 205 * @cmd_space: Pointer to memory area in which the commands should be encoded. 206 */ 207 static void vmw_surface_define_encode(const struct vmw_surface *srf, 208 void *cmd_space) 209 { 210 struct vmw_surface_define *cmd = (struct vmw_surface_define *) 211 cmd_space; 212 struct drm_vmw_size *src_size; 213 SVGA3dSize *cmd_size; 214 uint32_t cmd_len; 215 int i; 216 217 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize); 218 219 cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE; 220 cmd->header.size = cmd_len; 221 cmd->body.sid = srf->res.id; 222 cmd->body.surfaceFlags = srf->flags; 223 cmd->body.format = cpu_to_le32(srf->format); 224 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 225 cmd->body.face[i].numMipLevels = srf->mip_levels[i]; 226 227 cmd += 1; 228 cmd_size = (SVGA3dSize *) cmd; 229 src_size = srf->sizes; 230 231 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) { 232 cmd_size->width = src_size->width; 233 cmd_size->height = src_size->height; 234 cmd_size->depth = src_size->depth; 235 } 236 } 237 238 /** 239 * vmw_surface_dma_encode - Encode a surface_dma command. 240 * 241 * @srf: Pointer to a struct vmw_surface object. 242 * @cmd_space: Pointer to memory area in which the commands should be encoded. 243 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents 244 * should be placed or read from. 245 * @to_surface: Boolean whether to DMA to the surface or from the surface. 246 */ 247 static void vmw_surface_dma_encode(struct vmw_surface *srf, 248 void *cmd_space, 249 const SVGAGuestPtr *ptr, 250 bool to_surface) 251 { 252 uint32_t i; 253 struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space; 254 const struct svga3d_surface_desc *desc = 255 svga3dsurface_get_desc(srf->format); 256 257 for (i = 0; i < srf->num_sizes; ++i) { 258 SVGA3dCmdHeader *header = &cmd->header; 259 SVGA3dCmdSurfaceDMA *body = &cmd->body; 260 SVGA3dCopyBox *cb = &cmd->cb; 261 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix; 262 const struct vmw_surface_offset *cur_offset = &srf->offsets[i]; 263 const struct drm_vmw_size *cur_size = &srf->sizes[i]; 264 265 header->id = SVGA_3D_CMD_SURFACE_DMA; 266 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix); 267 268 body->guest.ptr = *ptr; 269 body->guest.ptr.offset += cur_offset->bo_offset; 270 body->guest.pitch = svga3dsurface_calculate_pitch(desc, 271 cur_size); 272 body->host.sid = srf->res.id; 273 body->host.face = cur_offset->face; 274 body->host.mipmap = cur_offset->mip; 275 body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM : 276 SVGA3D_READ_HOST_VRAM); 277 cb->x = 0; 278 cb->y = 0; 279 cb->z = 0; 280 cb->srcx = 0; 281 cb->srcy = 0; 282 cb->srcz = 0; 283 cb->w = cur_size->width; 284 cb->h = cur_size->height; 285 cb->d = cur_size->depth; 286 287 suffix->suffixSize = sizeof(*suffix); 288 suffix->maximumOffset = 289 svga3dsurface_get_image_buffer_size(desc, cur_size, 290 body->guest.pitch); 291 suffix->flags.discard = 0; 292 suffix->flags.unsynchronized = 0; 293 suffix->flags.reserved = 0; 294 ++cmd; 295 } 296 }; 297 298 299 /** 300 * vmw_hw_surface_destroy - destroy a Device surface 301 * 302 * @res: Pointer to a struct vmw_resource embedded in a struct 303 * vmw_surface. 304 * 305 * Destroys a the device surface associated with a struct vmw_surface if 306 * any, and adjusts accounting and resource count accordingly. 307 */ 308 static void vmw_hw_surface_destroy(struct vmw_resource *res) 309 { 310 311 struct vmw_private *dev_priv = res->dev_priv; 312 struct vmw_surface *srf; 313 void *cmd; 314 315 if (res->func->destroy == vmw_gb_surface_destroy) { 316 (void) vmw_gb_surface_destroy(res); 317 return; 318 } 319 320 if (res->id != -1) { 321 322 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); 323 if (unlikely(cmd == NULL)) { 324 DRM_ERROR("Failed reserving FIFO space for surface " 325 "destruction.\n"); 326 return; 327 } 328 329 vmw_surface_destroy_encode(res->id, cmd); 330 vmw_fifo_commit(dev_priv, vmw_surface_destroy_size()); 331 332 /* 333 * used_memory_size_atomic, or separate lock 334 * to avoid taking dev_priv::cmdbuf_mutex in 335 * the destroy path. 336 */ 337 338 mutex_lock(&dev_priv->cmdbuf_mutex); 339 srf = vmw_res_to_srf(res); 340 dev_priv->used_memory_size -= res->backup_size; 341 mutex_unlock(&dev_priv->cmdbuf_mutex); 342 } 343 vmw_3d_resource_dec(dev_priv, false); 344 } 345 346 /** 347 * vmw_legacy_srf_create - Create a device surface as part of the 348 * resource validation process. 349 * 350 * @res: Pointer to a struct vmw_surface. 351 * 352 * If the surface doesn't have a hw id. 353 * 354 * Returns -EBUSY if there wasn't sufficient device resources to 355 * complete the validation. Retry after freeing up resources. 356 * 357 * May return other errors if the kernel is out of guest resources. 358 */ 359 static int vmw_legacy_srf_create(struct vmw_resource *res) 360 { 361 struct vmw_private *dev_priv = res->dev_priv; 362 struct vmw_surface *srf; 363 uint32_t submit_size; 364 uint8_t *cmd; 365 int ret; 366 367 if (likely(res->id != -1)) 368 return 0; 369 370 srf = vmw_res_to_srf(res); 371 if (unlikely(dev_priv->used_memory_size + res->backup_size >= 372 dev_priv->memory_size)) 373 return -EBUSY; 374 375 /* 376 * Alloc id for the resource. 377 */ 378 379 ret = vmw_resource_alloc_id(res); 380 if (unlikely(ret != 0)) { 381 DRM_ERROR("Failed to allocate a surface id.\n"); 382 goto out_no_id; 383 } 384 385 if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) { 386 ret = -EBUSY; 387 goto out_no_fifo; 388 } 389 390 /* 391 * Encode surface define- commands. 392 */ 393 394 submit_size = vmw_surface_define_size(srf); 395 cmd = vmw_fifo_reserve(dev_priv, submit_size); 396 if (unlikely(cmd == NULL)) { 397 DRM_ERROR("Failed reserving FIFO space for surface " 398 "creation.\n"); 399 ret = -ENOMEM; 400 goto out_no_fifo; 401 } 402 403 vmw_surface_define_encode(srf, cmd); 404 vmw_fifo_commit(dev_priv, submit_size); 405 /* 406 * Surface memory usage accounting. 407 */ 408 409 dev_priv->used_memory_size += res->backup_size; 410 return 0; 411 412 out_no_fifo: 413 vmw_resource_release_id(res); 414 out_no_id: 415 return ret; 416 } 417 418 /** 419 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface. 420 * 421 * @res: Pointer to a struct vmw_res embedded in a struct 422 * vmw_surface. 423 * @val_buf: Pointer to a struct ttm_validate_buffer containing 424 * information about the backup buffer. 425 * @bind: Boolean wether to DMA to the surface. 426 * 427 * Transfer backup data to or from a legacy surface as part of the 428 * validation process. 429 * May return other errors if the kernel is out of guest resources. 430 * The backup buffer will be fenced or idle upon successful completion, 431 * and if the surface needs persistent backup storage, the backup buffer 432 * will also be returned reserved iff @bind is true. 433 */ 434 static int vmw_legacy_srf_dma(struct vmw_resource *res, 435 struct ttm_validate_buffer *val_buf, 436 bool bind) 437 { 438 SVGAGuestPtr ptr; 439 struct vmw_fence_obj *fence; 440 uint32_t submit_size; 441 struct vmw_surface *srf = vmw_res_to_srf(res); 442 uint8_t *cmd; 443 struct vmw_private *dev_priv = res->dev_priv; 444 445 BUG_ON(val_buf->bo == NULL); 446 447 submit_size = vmw_surface_dma_size(srf); 448 cmd = vmw_fifo_reserve(dev_priv, submit_size); 449 if (unlikely(cmd == NULL)) { 450 DRM_ERROR("Failed reserving FIFO space for surface " 451 "DMA.\n"); 452 return -ENOMEM; 453 } 454 vmw_bo_get_guest_ptr(val_buf->bo, &ptr); 455 vmw_surface_dma_encode(srf, cmd, &ptr, bind); 456 457 vmw_fifo_commit(dev_priv, submit_size); 458 459 /* 460 * Create a fence object and fence the backup buffer. 461 */ 462 463 (void) vmw_execbuf_fence_commands(NULL, dev_priv, 464 &fence, NULL); 465 466 vmw_fence_single_bo(val_buf->bo, fence); 467 468 if (likely(fence != NULL)) 469 vmw_fence_obj_unreference(&fence); 470 471 return 0; 472 } 473 474 /** 475 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the 476 * surface validation process. 477 * 478 * @res: Pointer to a struct vmw_res embedded in a struct 479 * vmw_surface. 480 * @val_buf: Pointer to a struct ttm_validate_buffer containing 481 * information about the backup buffer. 482 * 483 * This function will copy backup data to the surface if the 484 * backup buffer is dirty. 485 */ 486 static int vmw_legacy_srf_bind(struct vmw_resource *res, 487 struct ttm_validate_buffer *val_buf) 488 { 489 if (!res->backup_dirty) 490 return 0; 491 492 return vmw_legacy_srf_dma(res, val_buf, true); 493 } 494 495 496 /** 497 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the 498 * surface eviction process. 499 * 500 * @res: Pointer to a struct vmw_res embedded in a struct 501 * vmw_surface. 502 * @val_buf: Pointer to a struct ttm_validate_buffer containing 503 * information about the backup buffer. 504 * 505 * This function will copy backup data from the surface. 506 */ 507 static int vmw_legacy_srf_unbind(struct vmw_resource *res, 508 bool readback, 509 struct ttm_validate_buffer *val_buf) 510 { 511 if (unlikely(readback)) 512 return vmw_legacy_srf_dma(res, val_buf, false); 513 return 0; 514 } 515 516 /** 517 * vmw_legacy_srf_destroy - Destroy a device surface as part of a 518 * resource eviction process. 519 * 520 * @res: Pointer to a struct vmw_res embedded in a struct 521 * vmw_surface. 522 */ 523 static int vmw_legacy_srf_destroy(struct vmw_resource *res) 524 { 525 struct vmw_private *dev_priv = res->dev_priv; 526 uint32_t submit_size; 527 uint8_t *cmd; 528 529 BUG_ON(res->id == -1); 530 531 /* 532 * Encode the dma- and surface destroy commands. 533 */ 534 535 submit_size = vmw_surface_destroy_size(); 536 cmd = vmw_fifo_reserve(dev_priv, submit_size); 537 if (unlikely(cmd == NULL)) { 538 DRM_ERROR("Failed reserving FIFO space for surface " 539 "eviction.\n"); 540 return -ENOMEM; 541 } 542 543 vmw_surface_destroy_encode(res->id, cmd); 544 vmw_fifo_commit(dev_priv, submit_size); 545 546 /* 547 * Surface memory usage accounting. 548 */ 549 550 dev_priv->used_memory_size -= res->backup_size; 551 552 /* 553 * Release the surface ID. 554 */ 555 556 vmw_resource_release_id(res); 557 558 return 0; 559 } 560 561 562 /** 563 * vmw_surface_init - initialize a struct vmw_surface 564 * 565 * @dev_priv: Pointer to a device private struct. 566 * @srf: Pointer to the struct vmw_surface to initialize. 567 * @res_free: Pointer to a resource destructor used to free 568 * the object. 569 */ 570 static int vmw_surface_init(struct vmw_private *dev_priv, 571 struct vmw_surface *srf, 572 void (*res_free) (struct vmw_resource *res)) 573 { 574 int ret; 575 struct vmw_resource *res = &srf->res; 576 577 BUG_ON(res_free == NULL); 578 if (!dev_priv->has_mob) 579 (void) vmw_3d_resource_inc(dev_priv, false); 580 ret = vmw_resource_init(dev_priv, res, true, res_free, 581 (dev_priv->has_mob) ? &vmw_gb_surface_func : 582 &vmw_legacy_surface_func); 583 584 if (unlikely(ret != 0)) { 585 if (!dev_priv->has_mob) 586 vmw_3d_resource_dec(dev_priv, false); 587 res_free(res); 588 return ret; 589 } 590 591 /* 592 * The surface won't be visible to hardware until a 593 * surface validate. 594 */ 595 596 vmw_resource_activate(res, vmw_hw_surface_destroy); 597 return ret; 598 } 599 600 /** 601 * vmw_user_surface_base_to_res - TTM base object to resource converter for 602 * user visible surfaces 603 * 604 * @base: Pointer to a TTM base object 605 * 606 * Returns the struct vmw_resource embedded in a struct vmw_surface 607 * for the user-visible object identified by the TTM base object @base. 608 */ 609 static struct vmw_resource * 610 vmw_user_surface_base_to_res(struct ttm_base_object *base) 611 { 612 return &(container_of(base, struct vmw_user_surface, 613 prime.base)->srf.res); 614 } 615 616 /** 617 * vmw_user_surface_free - User visible surface resource destructor 618 * 619 * @res: A struct vmw_resource embedded in a struct vmw_surface. 620 */ 621 static void vmw_user_surface_free(struct vmw_resource *res) 622 { 623 struct vmw_surface *srf = vmw_res_to_srf(res); 624 struct vmw_user_surface *user_srf = 625 container_of(srf, struct vmw_user_surface, srf); 626 struct vmw_private *dev_priv = srf->res.dev_priv; 627 uint32_t size = user_srf->size; 628 629 if (user_srf->master) 630 drm_master_put(&user_srf->master); 631 kfree(srf->offsets); 632 kfree(srf->sizes); 633 kfree(srf->snooper.image); 634 ttm_prime_object_kfree(user_srf, prime); 635 ttm_mem_global_free(vmw_mem_glob(dev_priv), size); 636 } 637 638 /** 639 * vmw_user_surface_free - User visible surface TTM base object destructor 640 * 641 * @p_base: Pointer to a pointer to a TTM base object 642 * embedded in a struct vmw_user_surface. 643 * 644 * Drops the base object's reference on its resource, and the 645 * pointer pointed to by *p_base is set to NULL. 646 */ 647 static void vmw_user_surface_base_release(struct ttm_base_object **p_base) 648 { 649 struct ttm_base_object *base = *p_base; 650 struct vmw_user_surface *user_srf = 651 container_of(base, struct vmw_user_surface, prime.base); 652 struct vmw_resource *res = &user_srf->srf.res; 653 654 *p_base = NULL; 655 vmw_resource_unreference(&res); 656 } 657 658 /** 659 * vmw_user_surface_destroy_ioctl - Ioctl function implementing 660 * the user surface destroy functionality. 661 * 662 * @dev: Pointer to a struct drm_device. 663 * @data: Pointer to data copied from / to user-space. 664 * @file_priv: Pointer to a drm file private structure. 665 */ 666 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, 667 struct drm_file *file_priv) 668 { 669 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; 670 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 671 672 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE); 673 } 674 675 /** 676 * vmw_user_surface_define_ioctl - Ioctl function implementing 677 * the user surface define functionality. 678 * 679 * @dev: Pointer to a struct drm_device. 680 * @data: Pointer to data copied from / to user-space. 681 * @file_priv: Pointer to a drm file private structure. 682 */ 683 int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 684 struct drm_file *file_priv) 685 { 686 struct vmw_private *dev_priv = vmw_priv(dev); 687 struct vmw_user_surface *user_srf; 688 struct vmw_surface *srf; 689 struct vmw_resource *res; 690 struct vmw_resource *tmp; 691 union drm_vmw_surface_create_arg *arg = 692 (union drm_vmw_surface_create_arg *)data; 693 struct drm_vmw_surface_create_req *req = &arg->req; 694 struct drm_vmw_surface_arg *rep = &arg->rep; 695 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 696 struct drm_vmw_size __user *user_sizes; 697 int ret; 698 int i, j; 699 uint32_t cur_bo_offset; 700 struct drm_vmw_size *cur_size; 701 struct vmw_surface_offset *cur_offset; 702 uint32_t num_sizes; 703 uint32_t size; 704 const struct svga3d_surface_desc *desc; 705 706 if (unlikely(vmw_user_surface_size == 0)) 707 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + 708 128; 709 710 num_sizes = 0; 711 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 712 num_sizes += req->mip_levels[i]; 713 714 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * 715 DRM_VMW_MAX_MIP_LEVELS) 716 return -EINVAL; 717 718 size = vmw_user_surface_size + 128 + 719 ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) + 720 ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset)); 721 722 723 desc = svga3dsurface_get_desc(req->format); 724 if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { 725 DRM_ERROR("Invalid surface format for surface creation.\n"); 726 return -EINVAL; 727 } 728 729 ret = ttm_read_lock(&dev_priv->reservation_sem, true); 730 if (unlikely(ret != 0)) 731 return ret; 732 733 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), 734 size, false, true); 735 if (unlikely(ret != 0)) { 736 if (ret != -ERESTARTSYS) 737 DRM_ERROR("Out of graphics memory for surface" 738 " creation.\n"); 739 goto out_unlock; 740 } 741 742 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); 743 if (unlikely(user_srf == NULL)) { 744 ret = -ENOMEM; 745 goto out_no_user_srf; 746 } 747 748 srf = &user_srf->srf; 749 res = &srf->res; 750 751 srf->flags = req->flags; 752 srf->format = req->format; 753 srf->scanout = req->scanout; 754 755 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); 756 srf->num_sizes = num_sizes; 757 user_srf->size = size; 758 759 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); 760 if (unlikely(srf->sizes == NULL)) { 761 ret = -ENOMEM; 762 goto out_no_sizes; 763 } 764 srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets), 765 GFP_KERNEL); 766 if (unlikely(srf->sizes == NULL)) { 767 ret = -ENOMEM; 768 goto out_no_offsets; 769 } 770 771 user_sizes = (struct drm_vmw_size __user *)(unsigned long) 772 req->size_addr; 773 774 ret = copy_from_user(srf->sizes, user_sizes, 775 srf->num_sizes * sizeof(*srf->sizes)); 776 if (unlikely(ret != 0)) { 777 ret = -EFAULT; 778 goto out_no_copy; 779 } 780 781 srf->base_size = *srf->sizes; 782 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; 783 srf->multisample_count = 0; 784 785 cur_bo_offset = 0; 786 cur_offset = srf->offsets; 787 cur_size = srf->sizes; 788 789 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { 790 for (j = 0; j < srf->mip_levels[i]; ++j) { 791 uint32_t stride = svga3dsurface_calculate_pitch 792 (desc, cur_size); 793 794 cur_offset->face = i; 795 cur_offset->mip = j; 796 cur_offset->bo_offset = cur_bo_offset; 797 cur_bo_offset += svga3dsurface_get_image_buffer_size 798 (desc, cur_size, stride); 799 ++cur_offset; 800 ++cur_size; 801 } 802 } 803 res->backup_size = cur_bo_offset; 804 if (srf->scanout && 805 srf->num_sizes == 1 && 806 srf->sizes[0].width == 64 && 807 srf->sizes[0].height == 64 && 808 srf->format == SVGA3D_A8R8G8B8) { 809 810 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL); 811 /* clear the image */ 812 if (srf->snooper.image) { 813 memset(srf->snooper.image, 0x00, 64 * 64 * 4); 814 } else { 815 DRM_ERROR("Failed to allocate cursor_image\n"); 816 ret = -ENOMEM; 817 goto out_no_copy; 818 } 819 } else { 820 srf->snooper.image = NULL; 821 } 822 srf->snooper.crtc = NULL; 823 824 user_srf->prime.base.shareable = false; 825 user_srf->prime.base.tfile = NULL; 826 if (drm_is_primary_client(file_priv)) 827 user_srf->master = drm_master_get(file_priv->master); 828 829 /** 830 * From this point, the generic resource management functions 831 * destroy the object on failure. 832 */ 833 834 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); 835 if (unlikely(ret != 0)) 836 goto out_unlock; 837 838 /* 839 * A gb-aware client referencing a shared surface will 840 * expect a backup buffer to be present. 841 */ 842 if (dev_priv->has_mob && req->shareable) { 843 uint32_t backup_handle; 844 845 ret = vmw_user_dmabuf_alloc(dev_priv, tfile, 846 res->backup_size, 847 true, 848 &backup_handle, 849 &res->backup); 850 if (unlikely(ret != 0)) { 851 vmw_resource_unreference(&res); 852 goto out_unlock; 853 } 854 } 855 856 tmp = vmw_resource_reference(&srf->res); 857 ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, 858 req->shareable, VMW_RES_SURFACE, 859 &vmw_user_surface_base_release, NULL); 860 861 if (unlikely(ret != 0)) { 862 vmw_resource_unreference(&tmp); 863 vmw_resource_unreference(&res); 864 goto out_unlock; 865 } 866 867 rep->sid = user_srf->prime.base.hash.key; 868 vmw_resource_unreference(&res); 869 870 ttm_read_unlock(&dev_priv->reservation_sem); 871 return 0; 872 out_no_copy: 873 kfree(srf->offsets); 874 out_no_offsets: 875 kfree(srf->sizes); 876 out_no_sizes: 877 ttm_prime_object_kfree(user_srf, prime); 878 out_no_user_srf: 879 ttm_mem_global_free(vmw_mem_glob(dev_priv), size); 880 out_unlock: 881 ttm_read_unlock(&dev_priv->reservation_sem); 882 return ret; 883 } 884 885 886 static int 887 vmw_surface_handle_reference(struct vmw_private *dev_priv, 888 struct drm_file *file_priv, 889 uint32_t u_handle, 890 enum drm_vmw_handle_type handle_type, 891 struct ttm_base_object **base_p) 892 { 893 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 894 struct vmw_user_surface *user_srf; 895 uint32_t handle; 896 struct ttm_base_object *base; 897 int ret; 898 899 if (handle_type == DRM_VMW_HANDLE_PRIME) { 900 ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle); 901 if (unlikely(ret != 0)) 902 return ret; 903 } else { 904 if (unlikely(drm_is_render_client(file_priv))) { 905 DRM_ERROR("Render client refused legacy " 906 "surface reference.\n"); 907 return -EACCES; 908 } 909 handle = u_handle; 910 } 911 912 ret = -EINVAL; 913 base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle); 914 if (unlikely(base == NULL)) { 915 DRM_ERROR("Could not find surface to reference.\n"); 916 goto out_no_lookup; 917 } 918 919 if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) { 920 DRM_ERROR("Referenced object is not a surface.\n"); 921 goto out_bad_resource; 922 } 923 924 if (handle_type != DRM_VMW_HANDLE_PRIME) { 925 user_srf = container_of(base, struct vmw_user_surface, 926 prime.base); 927 928 /* 929 * Make sure the surface creator has the same 930 * authenticating master. 931 */ 932 if (drm_is_primary_client(file_priv) && 933 user_srf->master != file_priv->master) { 934 DRM_ERROR("Trying to reference surface outside of" 935 " master domain.\n"); 936 ret = -EACCES; 937 goto out_bad_resource; 938 } 939 940 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); 941 if (unlikely(ret != 0)) { 942 DRM_ERROR("Could not add a reference to a surface.\n"); 943 goto out_bad_resource; 944 } 945 } 946 947 *base_p = base; 948 return 0; 949 950 out_bad_resource: 951 ttm_base_object_unref(&base); 952 out_no_lookup: 953 if (handle_type == DRM_VMW_HANDLE_PRIME) 954 (void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); 955 956 return ret; 957 } 958 959 /** 960 * vmw_user_surface_define_ioctl - Ioctl function implementing 961 * the user surface reference functionality. 962 * 963 * @dev: Pointer to a struct drm_device. 964 * @data: Pointer to data copied from / to user-space. 965 * @file_priv: Pointer to a drm file private structure. 966 */ 967 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, 968 struct drm_file *file_priv) 969 { 970 struct vmw_private *dev_priv = vmw_priv(dev); 971 union drm_vmw_surface_reference_arg *arg = 972 (union drm_vmw_surface_reference_arg *)data; 973 struct drm_vmw_surface_arg *req = &arg->req; 974 struct drm_vmw_surface_create_req *rep = &arg->rep; 975 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 976 struct vmw_surface *srf; 977 struct vmw_user_surface *user_srf; 978 struct drm_vmw_size __user *user_sizes; 979 struct ttm_base_object *base; 980 int ret; 981 982 ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, 983 req->handle_type, &base); 984 if (unlikely(ret != 0)) 985 return ret; 986 987 user_srf = container_of(base, struct vmw_user_surface, prime.base); 988 srf = &user_srf->srf; 989 990 rep->flags = srf->flags; 991 rep->format = srf->format; 992 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels)); 993 user_sizes = (struct drm_vmw_size __user *)(unsigned long) 994 rep->size_addr; 995 996 if (user_sizes) 997 ret = copy_to_user(user_sizes, &srf->base_size, 998 sizeof(srf->base_size)); 999 if (unlikely(ret != 0)) { 1000 DRM_ERROR("copy_to_user failed %p %u\n", 1001 user_sizes, srf->num_sizes); 1002 ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE); 1003 ret = -EFAULT; 1004 } 1005 1006 ttm_base_object_unref(&base); 1007 1008 return ret; 1009 } 1010 1011 /** 1012 * vmw_surface_define_encode - Encode a surface_define command. 1013 * 1014 * @srf: Pointer to a struct vmw_surface object. 1015 * @cmd_space: Pointer to memory area in which the commands should be encoded. 1016 */ 1017 static int vmw_gb_surface_create(struct vmw_resource *res) 1018 { 1019 struct vmw_private *dev_priv = res->dev_priv; 1020 struct vmw_surface *srf = vmw_res_to_srf(res); 1021 uint32_t cmd_len, submit_len; 1022 int ret; 1023 struct { 1024 SVGA3dCmdHeader header; 1025 SVGA3dCmdDefineGBSurface body; 1026 } *cmd; 1027 1028 if (likely(res->id != -1)) 1029 return 0; 1030 1031 (void) vmw_3d_resource_inc(dev_priv, false); 1032 ret = vmw_resource_alloc_id(res); 1033 if (unlikely(ret != 0)) { 1034 DRM_ERROR("Failed to allocate a surface id.\n"); 1035 goto out_no_id; 1036 } 1037 1038 if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) { 1039 ret = -EBUSY; 1040 goto out_no_fifo; 1041 } 1042 1043 cmd_len = sizeof(cmd->body); 1044 submit_len = sizeof(*cmd); 1045 cmd = vmw_fifo_reserve(dev_priv, submit_len); 1046 if (unlikely(cmd == NULL)) { 1047 DRM_ERROR("Failed reserving FIFO space for surface " 1048 "creation.\n"); 1049 ret = -ENOMEM; 1050 goto out_no_fifo; 1051 } 1052 1053 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE; 1054 cmd->header.size = cmd_len; 1055 cmd->body.sid = srf->res.id; 1056 cmd->body.surfaceFlags = srf->flags; 1057 cmd->body.format = cpu_to_le32(srf->format); 1058 cmd->body.numMipLevels = srf->mip_levels[0]; 1059 cmd->body.multisampleCount = srf->multisample_count; 1060 cmd->body.autogenFilter = srf->autogen_filter; 1061 cmd->body.size.width = srf->base_size.width; 1062 cmd->body.size.height = srf->base_size.height; 1063 cmd->body.size.depth = srf->base_size.depth; 1064 vmw_fifo_commit(dev_priv, submit_len); 1065 1066 return 0; 1067 1068 out_no_fifo: 1069 vmw_resource_release_id(res); 1070 out_no_id: 1071 vmw_3d_resource_dec(dev_priv, false); 1072 return ret; 1073 } 1074 1075 1076 static int vmw_gb_surface_bind(struct vmw_resource *res, 1077 struct ttm_validate_buffer *val_buf) 1078 { 1079 struct vmw_private *dev_priv = res->dev_priv; 1080 struct { 1081 SVGA3dCmdHeader header; 1082 SVGA3dCmdBindGBSurface body; 1083 } *cmd1; 1084 struct { 1085 SVGA3dCmdHeader header; 1086 SVGA3dCmdUpdateGBSurface body; 1087 } *cmd2; 1088 uint32_t submit_size; 1089 struct ttm_buffer_object *bo = val_buf->bo; 1090 1091 BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 1092 1093 submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0); 1094 1095 cmd1 = vmw_fifo_reserve(dev_priv, submit_size); 1096 if (unlikely(cmd1 == NULL)) { 1097 DRM_ERROR("Failed reserving FIFO space for surface " 1098 "binding.\n"); 1099 return -ENOMEM; 1100 } 1101 1102 cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; 1103 cmd1->header.size = sizeof(cmd1->body); 1104 cmd1->body.sid = res->id; 1105 cmd1->body.mobid = bo->mem.start; 1106 if (res->backup_dirty) { 1107 cmd2 = (void *) &cmd1[1]; 1108 cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE; 1109 cmd2->header.size = sizeof(cmd2->body); 1110 cmd2->body.sid = res->id; 1111 res->backup_dirty = false; 1112 } 1113 vmw_fifo_commit(dev_priv, submit_size); 1114 1115 return 0; 1116 } 1117 1118 static int vmw_gb_surface_unbind(struct vmw_resource *res, 1119 bool readback, 1120 struct ttm_validate_buffer *val_buf) 1121 { 1122 struct vmw_private *dev_priv = res->dev_priv; 1123 struct ttm_buffer_object *bo = val_buf->bo; 1124 struct vmw_fence_obj *fence; 1125 1126 struct { 1127 SVGA3dCmdHeader header; 1128 SVGA3dCmdReadbackGBSurface body; 1129 } *cmd1; 1130 struct { 1131 SVGA3dCmdHeader header; 1132 SVGA3dCmdInvalidateGBSurface body; 1133 } *cmd2; 1134 struct { 1135 SVGA3dCmdHeader header; 1136 SVGA3dCmdBindGBSurface body; 1137 } *cmd3; 1138 uint32_t submit_size; 1139 uint8_t *cmd; 1140 1141 1142 BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 1143 1144 submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2)); 1145 cmd = vmw_fifo_reserve(dev_priv, submit_size); 1146 if (unlikely(cmd == NULL)) { 1147 DRM_ERROR("Failed reserving FIFO space for surface " 1148 "unbinding.\n"); 1149 return -ENOMEM; 1150 } 1151 1152 if (readback) { 1153 cmd1 = (void *) cmd; 1154 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE; 1155 cmd1->header.size = sizeof(cmd1->body); 1156 cmd1->body.sid = res->id; 1157 cmd3 = (void *) &cmd1[1]; 1158 } else { 1159 cmd2 = (void *) cmd; 1160 cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE; 1161 cmd2->header.size = sizeof(cmd2->body); 1162 cmd2->body.sid = res->id; 1163 cmd3 = (void *) &cmd2[1]; 1164 } 1165 1166 cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; 1167 cmd3->header.size = sizeof(cmd3->body); 1168 cmd3->body.sid = res->id; 1169 cmd3->body.mobid = SVGA3D_INVALID_ID; 1170 1171 vmw_fifo_commit(dev_priv, submit_size); 1172 1173 /* 1174 * Create a fence object and fence the backup buffer. 1175 */ 1176 1177 (void) vmw_execbuf_fence_commands(NULL, dev_priv, 1178 &fence, NULL); 1179 1180 vmw_fence_single_bo(val_buf->bo, fence); 1181 1182 if (likely(fence != NULL)) 1183 vmw_fence_obj_unreference(&fence); 1184 1185 return 0; 1186 } 1187 1188 static int vmw_gb_surface_destroy(struct vmw_resource *res) 1189 { 1190 struct vmw_private *dev_priv = res->dev_priv; 1191 struct { 1192 SVGA3dCmdHeader header; 1193 SVGA3dCmdDestroyGBSurface body; 1194 } *cmd; 1195 1196 if (likely(res->id == -1)) 1197 return 0; 1198 1199 mutex_lock(&dev_priv->binding_mutex); 1200 vmw_context_binding_res_list_scrub(&res->binding_head); 1201 1202 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 1203 if (unlikely(cmd == NULL)) { 1204 DRM_ERROR("Failed reserving FIFO space for surface " 1205 "destruction.\n"); 1206 mutex_unlock(&dev_priv->binding_mutex); 1207 return -ENOMEM; 1208 } 1209 1210 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE; 1211 cmd->header.size = sizeof(cmd->body); 1212 cmd->body.sid = res->id; 1213 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 1214 mutex_unlock(&dev_priv->binding_mutex); 1215 vmw_resource_release_id(res); 1216 vmw_3d_resource_dec(dev_priv, false); 1217 1218 return 0; 1219 } 1220 1221 /** 1222 * vmw_gb_surface_define_ioctl - Ioctl function implementing 1223 * the user surface define functionality. 1224 * 1225 * @dev: Pointer to a struct drm_device. 1226 * @data: Pointer to data copied from / to user-space. 1227 * @file_priv: Pointer to a drm file private structure. 1228 */ 1229 int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, 1230 struct drm_file *file_priv) 1231 { 1232 struct vmw_private *dev_priv = vmw_priv(dev); 1233 struct vmw_user_surface *user_srf; 1234 struct vmw_surface *srf; 1235 struct vmw_resource *res; 1236 struct vmw_resource *tmp; 1237 union drm_vmw_gb_surface_create_arg *arg = 1238 (union drm_vmw_gb_surface_create_arg *)data; 1239 struct drm_vmw_gb_surface_create_req *req = &arg->req; 1240 struct drm_vmw_gb_surface_create_rep *rep = &arg->rep; 1241 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1242 int ret; 1243 uint32_t size; 1244 const struct svga3d_surface_desc *desc; 1245 uint32_t backup_handle; 1246 1247 if (unlikely(vmw_user_surface_size == 0)) 1248 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + 1249 128; 1250 1251 size = vmw_user_surface_size + 128; 1252 1253 desc = svga3dsurface_get_desc(req->format); 1254 if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { 1255 DRM_ERROR("Invalid surface format for surface creation.\n"); 1256 return -EINVAL; 1257 } 1258 1259 ret = ttm_read_lock(&dev_priv->reservation_sem, true); 1260 if (unlikely(ret != 0)) 1261 return ret; 1262 1263 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), 1264 size, false, true); 1265 if (unlikely(ret != 0)) { 1266 if (ret != -ERESTARTSYS) 1267 DRM_ERROR("Out of graphics memory for surface" 1268 " creation.\n"); 1269 goto out_unlock; 1270 } 1271 1272 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); 1273 if (unlikely(user_srf == NULL)) { 1274 ret = -ENOMEM; 1275 goto out_no_user_srf; 1276 } 1277 1278 srf = &user_srf->srf; 1279 res = &srf->res; 1280 1281 srf->flags = req->svga3d_flags; 1282 srf->format = req->format; 1283 srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout; 1284 srf->mip_levels[0] = req->mip_levels; 1285 srf->num_sizes = 1; 1286 srf->sizes = NULL; 1287 srf->offsets = NULL; 1288 user_srf->size = size; 1289 srf->base_size = req->base_size; 1290 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; 1291 srf->multisample_count = req->multisample_count; 1292 res->backup_size = svga3dsurface_get_serialized_size 1293 (srf->format, srf->base_size, srf->mip_levels[0], 1294 srf->flags & SVGA3D_SURFACE_CUBEMAP); 1295 1296 user_srf->prime.base.shareable = false; 1297 user_srf->prime.base.tfile = NULL; 1298 if (drm_is_primary_client(file_priv)) 1299 user_srf->master = drm_master_get(file_priv->master); 1300 1301 /** 1302 * From this point, the generic resource management functions 1303 * destroy the object on failure. 1304 */ 1305 1306 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); 1307 if (unlikely(ret != 0)) 1308 goto out_unlock; 1309 1310 if (req->buffer_handle != SVGA3D_INVALID_ID) { 1311 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, 1312 &res->backup); 1313 } else if (req->drm_surface_flags & 1314 drm_vmw_surface_flag_create_buffer) 1315 ret = vmw_user_dmabuf_alloc(dev_priv, tfile, 1316 res->backup_size, 1317 req->drm_surface_flags & 1318 drm_vmw_surface_flag_shareable, 1319 &backup_handle, 1320 &res->backup); 1321 1322 if (unlikely(ret != 0)) { 1323 vmw_resource_unreference(&res); 1324 goto out_unlock; 1325 } 1326 1327 tmp = vmw_resource_reference(&srf->res); 1328 ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, 1329 req->drm_surface_flags & 1330 drm_vmw_surface_flag_shareable, 1331 VMW_RES_SURFACE, 1332 &vmw_user_surface_base_release, NULL); 1333 1334 if (unlikely(ret != 0)) { 1335 vmw_resource_unreference(&tmp); 1336 vmw_resource_unreference(&res); 1337 goto out_unlock; 1338 } 1339 1340 rep->handle = user_srf->prime.base.hash.key; 1341 rep->backup_size = res->backup_size; 1342 if (res->backup) { 1343 rep->buffer_map_handle = 1344 drm_vma_node_offset_addr(&res->backup->base.vma_node); 1345 rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE; 1346 rep->buffer_handle = backup_handle; 1347 } else { 1348 rep->buffer_map_handle = 0; 1349 rep->buffer_size = 0; 1350 rep->buffer_handle = SVGA3D_INVALID_ID; 1351 } 1352 1353 vmw_resource_unreference(&res); 1354 1355 ttm_read_unlock(&dev_priv->reservation_sem); 1356 return 0; 1357 out_no_user_srf: 1358 ttm_mem_global_free(vmw_mem_glob(dev_priv), size); 1359 out_unlock: 1360 ttm_read_unlock(&dev_priv->reservation_sem); 1361 return ret; 1362 } 1363 1364 /** 1365 * vmw_gb_surface_reference_ioctl - Ioctl function implementing 1366 * the user surface reference functionality. 1367 * 1368 * @dev: Pointer to a struct drm_device. 1369 * @data: Pointer to data copied from / to user-space. 1370 * @file_priv: Pointer to a drm file private structure. 1371 */ 1372 int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, 1373 struct drm_file *file_priv) 1374 { 1375 struct vmw_private *dev_priv = vmw_priv(dev); 1376 union drm_vmw_gb_surface_reference_arg *arg = 1377 (union drm_vmw_gb_surface_reference_arg *)data; 1378 struct drm_vmw_surface_arg *req = &arg->req; 1379 struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep; 1380 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1381 struct vmw_surface *srf; 1382 struct vmw_user_surface *user_srf; 1383 struct ttm_base_object *base; 1384 uint32_t backup_handle; 1385 int ret = -EINVAL; 1386 1387 ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, 1388 req->handle_type, &base); 1389 if (unlikely(ret != 0)) 1390 return ret; 1391 1392 user_srf = container_of(base, struct vmw_user_surface, prime.base); 1393 srf = &user_srf->srf; 1394 if (srf->res.backup == NULL) { 1395 DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); 1396 goto out_bad_resource; 1397 } 1398 1399 mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ 1400 ret = vmw_user_dmabuf_reference(tfile, srf->res.backup, 1401 &backup_handle); 1402 mutex_unlock(&dev_priv->cmdbuf_mutex); 1403 1404 if (unlikely(ret != 0)) { 1405 DRM_ERROR("Could not add a reference to a GB surface " 1406 "backup buffer.\n"); 1407 (void) ttm_ref_object_base_unref(tfile, base->hash.key, 1408 TTM_REF_USAGE); 1409 goto out_bad_resource; 1410 } 1411 1412 rep->creq.svga3d_flags = srf->flags; 1413 rep->creq.format = srf->format; 1414 rep->creq.mip_levels = srf->mip_levels[0]; 1415 rep->creq.drm_surface_flags = 0; 1416 rep->creq.multisample_count = srf->multisample_count; 1417 rep->creq.autogen_filter = srf->autogen_filter; 1418 rep->creq.buffer_handle = backup_handle; 1419 rep->creq.base_size = srf->base_size; 1420 rep->crep.handle = user_srf->prime.base.hash.key; 1421 rep->crep.backup_size = srf->res.backup_size; 1422 rep->crep.buffer_handle = backup_handle; 1423 rep->crep.buffer_map_handle = 1424 drm_vma_node_offset_addr(&srf->res.backup->base.vma_node); 1425 rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE; 1426 1427 out_bad_resource: 1428 ttm_base_object_unref(&base); 1429 1430 return ret; 1431 } 1432