1 /************************************************************************** 2 * 3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include "vmwgfx_drv.h" 29 #include "vmwgfx_drm.h" 30 #include "ttm/ttm_object.h" 31 #include "ttm/ttm_placement.h" 32 #include "drmP.h" 33 34 #define VMW_RES_CONTEXT ttm_driver_type0 35 #define VMW_RES_SURFACE ttm_driver_type1 36 #define VMW_RES_STREAM ttm_driver_type2 37 38 struct vmw_user_context { 39 struct ttm_base_object base; 40 struct vmw_resource res; 41 }; 42 43 struct vmw_user_surface { 44 struct ttm_base_object base; 45 struct vmw_surface srf; 46 }; 47 48 struct vmw_user_dma_buffer { 49 struct ttm_base_object base; 50 struct vmw_dma_buffer dma; 51 }; 52 53 struct vmw_bo_user_rep { 54 uint32_t handle; 55 uint64_t map_handle; 56 }; 57 58 struct vmw_stream { 59 struct vmw_resource res; 60 uint32_t stream_id; 61 }; 62 63 struct vmw_user_stream { 64 struct ttm_base_object base; 65 struct vmw_stream stream; 66 }; 67 68 static inline struct vmw_dma_buffer * 69 vmw_dma_buffer(struct ttm_buffer_object *bo) 70 { 71 return container_of(bo, struct vmw_dma_buffer, base); 72 } 73 74 static inline struct vmw_user_dma_buffer * 75 vmw_user_dma_buffer(struct ttm_buffer_object *bo) 76 { 77 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 78 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma); 79 } 80 81 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) 82 { 83 kref_get(&res->kref); 84 return res; 85 } 86 87 static void vmw_resource_release(struct kref *kref) 88 { 89 struct vmw_resource *res = 90 container_of(kref, struct vmw_resource, kref); 91 struct vmw_private *dev_priv = res->dev_priv; 92 93 idr_remove(res->idr, res->id); 94 write_unlock(&dev_priv->resource_lock); 95 96 if (likely(res->hw_destroy != NULL)) 97 res->hw_destroy(res); 98 99 if (res->res_free != NULL) 100 res->res_free(res); 101 else 102 kfree(res); 103 104 write_lock(&dev_priv->resource_lock); 105 } 106 107 void vmw_resource_unreference(struct vmw_resource **p_res) 108 { 109 struct vmw_resource *res = *p_res; 110 struct vmw_private *dev_priv = res->dev_priv; 111 112 *p_res = NULL; 113 write_lock(&dev_priv->resource_lock); 114 kref_put(&res->kref, vmw_resource_release); 115 write_unlock(&dev_priv->resource_lock); 116 } 117 118 static int vmw_resource_init(struct vmw_private *dev_priv, 119 struct vmw_resource *res, 120 struct idr *idr, 121 enum ttm_object_type obj_type, 122 void (*res_free) (struct vmw_resource *res)) 123 { 124 int ret; 125 126 kref_init(&res->kref); 127 res->hw_destroy = NULL; 128 res->res_free = res_free; 129 res->res_type = obj_type; 130 res->idr = idr; 131 res->avail = false; 132 res->dev_priv = dev_priv; 133 134 do { 135 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0)) 136 return -ENOMEM; 137 138 write_lock(&dev_priv->resource_lock); 139 ret = idr_get_new_above(idr, res, 1, &res->id); 140 write_unlock(&dev_priv->resource_lock); 141 142 } while (ret == -EAGAIN); 143 144 return ret; 145 } 146 147 /** 148 * vmw_resource_activate 149 * 150 * @res: Pointer to the newly created resource 151 * @hw_destroy: Destroy function. NULL if none. 152 * 153 * Activate a resource after the hardware has been made aware of it. 154 * Set tye destroy function to @destroy. Typically this frees the 155 * resource and destroys the hardware resources associated with it. 156 * Activate basically means that the function vmw_resource_lookup will 157 * find it. 158 */ 159 160 static void vmw_resource_activate(struct vmw_resource *res, 161 void (*hw_destroy) (struct vmw_resource *)) 162 { 163 struct vmw_private *dev_priv = res->dev_priv; 164 165 write_lock(&dev_priv->resource_lock); 166 res->avail = true; 167 res->hw_destroy = hw_destroy; 168 write_unlock(&dev_priv->resource_lock); 169 } 170 171 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv, 172 struct idr *idr, int id) 173 { 174 struct vmw_resource *res; 175 176 read_lock(&dev_priv->resource_lock); 177 res = idr_find(idr, id); 178 if (res && res->avail) 179 kref_get(&res->kref); 180 else 181 res = NULL; 182 read_unlock(&dev_priv->resource_lock); 183 184 if (unlikely(res == NULL)) 185 return NULL; 186 187 return res; 188 } 189 190 /** 191 * Context management: 192 */ 193 194 static void vmw_hw_context_destroy(struct vmw_resource *res) 195 { 196 197 struct vmw_private *dev_priv = res->dev_priv; 198 struct { 199 SVGA3dCmdHeader header; 200 SVGA3dCmdDestroyContext body; 201 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 202 203 if (unlikely(cmd == NULL)) { 204 DRM_ERROR("Failed reserving FIFO space for surface " 205 "destruction.\n"); 206 return; 207 } 208 209 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY); 210 cmd->header.size = cpu_to_le32(sizeof(cmd->body)); 211 cmd->body.cid = cpu_to_le32(res->id); 212 213 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 214 vmw_3d_resource_dec(dev_priv); 215 } 216 217 static int vmw_context_init(struct vmw_private *dev_priv, 218 struct vmw_resource *res, 219 void (*res_free) (struct vmw_resource *res)) 220 { 221 int ret; 222 223 struct { 224 SVGA3dCmdHeader header; 225 SVGA3dCmdDefineContext body; 226 } *cmd; 227 228 ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr, 229 VMW_RES_CONTEXT, res_free); 230 231 if (unlikely(ret != 0)) { 232 if (res_free == NULL) 233 kfree(res); 234 else 235 res_free(res); 236 return ret; 237 } 238 239 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 240 if (unlikely(cmd == NULL)) { 241 DRM_ERROR("Fifo reserve failed.\n"); 242 vmw_resource_unreference(&res); 243 return -ENOMEM; 244 } 245 246 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE); 247 cmd->header.size = cpu_to_le32(sizeof(cmd->body)); 248 cmd->body.cid = cpu_to_le32(res->id); 249 250 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 251 (void) vmw_3d_resource_inc(dev_priv); 252 vmw_resource_activate(res, vmw_hw_context_destroy); 253 return 0; 254 } 255 256 struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) 257 { 258 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL); 259 int ret; 260 261 if (unlikely(res == NULL)) 262 return NULL; 263 264 ret = vmw_context_init(dev_priv, res, NULL); 265 return (ret == 0) ? res : NULL; 266 } 267 268 /** 269 * User-space context management: 270 */ 271 272 static void vmw_user_context_free(struct vmw_resource *res) 273 { 274 struct vmw_user_context *ctx = 275 container_of(res, struct vmw_user_context, res); 276 277 kfree(ctx); 278 } 279 280 /** 281 * This function is called when user space has no more references on the 282 * base object. It releases the base-object's reference on the resource object. 283 */ 284 285 static void vmw_user_context_base_release(struct ttm_base_object **p_base) 286 { 287 struct ttm_base_object *base = *p_base; 288 struct vmw_user_context *ctx = 289 container_of(base, struct vmw_user_context, base); 290 struct vmw_resource *res = &ctx->res; 291 292 *p_base = NULL; 293 vmw_resource_unreference(&res); 294 } 295 296 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, 297 struct drm_file *file_priv) 298 { 299 struct vmw_private *dev_priv = vmw_priv(dev); 300 struct vmw_resource *res; 301 struct vmw_user_context *ctx; 302 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; 303 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 304 int ret = 0; 305 306 res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid); 307 if (unlikely(res == NULL)) 308 return -EINVAL; 309 310 if (res->res_free != &vmw_user_context_free) { 311 ret = -EINVAL; 312 goto out; 313 } 314 315 ctx = container_of(res, struct vmw_user_context, res); 316 if (ctx->base.tfile != tfile && !ctx->base.shareable) { 317 ret = -EPERM; 318 goto out; 319 } 320 321 ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE); 322 out: 323 vmw_resource_unreference(&res); 324 return ret; 325 } 326 327 int vmw_context_define_ioctl(struct drm_device *dev, void *data, 328 struct drm_file *file_priv) 329 { 330 struct vmw_private *dev_priv = vmw_priv(dev); 331 struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 332 struct vmw_resource *res; 333 struct vmw_resource *tmp; 334 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; 335 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 336 int ret; 337 338 if (unlikely(ctx == NULL)) 339 return -ENOMEM; 340 341 res = &ctx->res; 342 ctx->base.shareable = false; 343 ctx->base.tfile = NULL; 344 345 ret = vmw_context_init(dev_priv, res, vmw_user_context_free); 346 if (unlikely(ret != 0)) 347 return ret; 348 349 tmp = vmw_resource_reference(&ctx->res); 350 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT, 351 &vmw_user_context_base_release, NULL); 352 353 if (unlikely(ret != 0)) { 354 vmw_resource_unreference(&tmp); 355 goto out_err; 356 } 357 358 arg->cid = res->id; 359 out_err: 360 vmw_resource_unreference(&res); 361 return ret; 362 363 } 364 365 int vmw_context_check(struct vmw_private *dev_priv, 366 struct ttm_object_file *tfile, 367 int id) 368 { 369 struct vmw_resource *res; 370 int ret = 0; 371 372 read_lock(&dev_priv->resource_lock); 373 res = idr_find(&dev_priv->context_idr, id); 374 if (res && res->avail) { 375 struct vmw_user_context *ctx = 376 container_of(res, struct vmw_user_context, res); 377 if (ctx->base.tfile != tfile && !ctx->base.shareable) 378 ret = -EPERM; 379 } else 380 ret = -EINVAL; 381 read_unlock(&dev_priv->resource_lock); 382 383 return ret; 384 } 385 386 387 /** 388 * Surface management. 389 */ 390 391 static void vmw_hw_surface_destroy(struct vmw_resource *res) 392 { 393 394 struct vmw_private *dev_priv = res->dev_priv; 395 struct { 396 SVGA3dCmdHeader header; 397 SVGA3dCmdDestroySurface body; 398 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 399 400 if (unlikely(cmd == NULL)) { 401 DRM_ERROR("Failed reserving FIFO space for surface " 402 "destruction.\n"); 403 return; 404 } 405 406 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY); 407 cmd->header.size = cpu_to_le32(sizeof(cmd->body)); 408 cmd->body.sid = cpu_to_le32(res->id); 409 410 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 411 vmw_3d_resource_dec(dev_priv); 412 } 413 414 void vmw_surface_res_free(struct vmw_resource *res) 415 { 416 struct vmw_surface *srf = container_of(res, struct vmw_surface, res); 417 418 kfree(srf->sizes); 419 kfree(srf->snooper.image); 420 kfree(srf); 421 } 422 423 int vmw_surface_init(struct vmw_private *dev_priv, 424 struct vmw_surface *srf, 425 void (*res_free) (struct vmw_resource *res)) 426 { 427 int ret; 428 struct { 429 SVGA3dCmdHeader header; 430 SVGA3dCmdDefineSurface body; 431 } *cmd; 432 SVGA3dSize *cmd_size; 433 struct vmw_resource *res = &srf->res; 434 struct drm_vmw_size *src_size; 435 size_t submit_size; 436 uint32_t cmd_len; 437 int i; 438 439 BUG_ON(res_free == NULL); 440 ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr, 441 VMW_RES_SURFACE, res_free); 442 443 if (unlikely(ret != 0)) { 444 res_free(res); 445 return ret; 446 } 447 448 submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize); 449 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize); 450 451 cmd = vmw_fifo_reserve(dev_priv, submit_size); 452 if (unlikely(cmd == NULL)) { 453 DRM_ERROR("Fifo reserve failed for create surface.\n"); 454 vmw_resource_unreference(&res); 455 return -ENOMEM; 456 } 457 458 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE); 459 cmd->header.size = cpu_to_le32(cmd_len); 460 cmd->body.sid = cpu_to_le32(res->id); 461 cmd->body.surfaceFlags = cpu_to_le32(srf->flags); 462 cmd->body.format = cpu_to_le32(srf->format); 463 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { 464 cmd->body.face[i].numMipLevels = 465 cpu_to_le32(srf->mip_levels[i]); 466 } 467 468 cmd += 1; 469 cmd_size = (SVGA3dSize *) cmd; 470 src_size = srf->sizes; 471 472 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) { 473 cmd_size->width = cpu_to_le32(src_size->width); 474 cmd_size->height = cpu_to_le32(src_size->height); 475 cmd_size->depth = cpu_to_le32(src_size->depth); 476 } 477 478 vmw_fifo_commit(dev_priv, submit_size); 479 (void) vmw_3d_resource_inc(dev_priv); 480 vmw_resource_activate(res, vmw_hw_surface_destroy); 481 return 0; 482 } 483 484 static void vmw_user_surface_free(struct vmw_resource *res) 485 { 486 struct vmw_surface *srf = container_of(res, struct vmw_surface, res); 487 struct vmw_user_surface *user_srf = 488 container_of(srf, struct vmw_user_surface, srf); 489 490 kfree(srf->sizes); 491 kfree(srf->snooper.image); 492 kfree(user_srf); 493 } 494 495 int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv, 496 struct ttm_object_file *tfile, 497 uint32_t handle, struct vmw_surface **out) 498 { 499 struct vmw_resource *res; 500 struct vmw_surface *srf; 501 struct vmw_user_surface *user_srf; 502 struct ttm_base_object *base; 503 int ret = -EINVAL; 504 505 base = ttm_base_object_lookup(tfile, handle); 506 if (unlikely(base == NULL)) 507 return -EINVAL; 508 509 if (unlikely(base->object_type != VMW_RES_SURFACE)) 510 goto out_bad_resource; 511 512 user_srf = container_of(base, struct vmw_user_surface, base); 513 srf = &user_srf->srf; 514 res = &srf->res; 515 516 read_lock(&dev_priv->resource_lock); 517 518 if (!res->avail || res->res_free != &vmw_user_surface_free) { 519 read_unlock(&dev_priv->resource_lock); 520 goto out_bad_resource; 521 } 522 523 kref_get(&res->kref); 524 read_unlock(&dev_priv->resource_lock); 525 526 *out = srf; 527 ret = 0; 528 529 out_bad_resource: 530 ttm_base_object_unref(&base); 531 532 return ret; 533 } 534 535 static void vmw_user_surface_base_release(struct ttm_base_object **p_base) 536 { 537 struct ttm_base_object *base = *p_base; 538 struct vmw_user_surface *user_srf = 539 container_of(base, struct vmw_user_surface, base); 540 struct vmw_resource *res = &user_srf->srf.res; 541 542 *p_base = NULL; 543 vmw_resource_unreference(&res); 544 } 545 546 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, 547 struct drm_file *file_priv) 548 { 549 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; 550 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 551 552 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE); 553 } 554 555 int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 556 struct drm_file *file_priv) 557 { 558 struct vmw_private *dev_priv = vmw_priv(dev); 559 struct vmw_user_surface *user_srf = 560 kmalloc(sizeof(*user_srf), GFP_KERNEL); 561 struct vmw_surface *srf; 562 struct vmw_resource *res; 563 struct vmw_resource *tmp; 564 union drm_vmw_surface_create_arg *arg = 565 (union drm_vmw_surface_create_arg *)data; 566 struct drm_vmw_surface_create_req *req = &arg->req; 567 struct drm_vmw_surface_arg *rep = &arg->rep; 568 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 569 struct drm_vmw_size __user *user_sizes; 570 int ret; 571 int i; 572 573 if (unlikely(user_srf == NULL)) 574 return -ENOMEM; 575 576 srf = &user_srf->srf; 577 res = &srf->res; 578 579 srf->flags = req->flags; 580 srf->format = req->format; 581 srf->scanout = req->scanout; 582 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); 583 srf->num_sizes = 0; 584 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 585 srf->num_sizes += srf->mip_levels[i]; 586 587 if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES * 588 DRM_VMW_MAX_MIP_LEVELS) { 589 ret = -EINVAL; 590 goto out_err0; 591 } 592 593 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); 594 if (unlikely(srf->sizes == NULL)) { 595 ret = -ENOMEM; 596 goto out_err0; 597 } 598 599 user_sizes = (struct drm_vmw_size __user *)(unsigned long) 600 req->size_addr; 601 602 ret = copy_from_user(srf->sizes, user_sizes, 603 srf->num_sizes * sizeof(*srf->sizes)); 604 if (unlikely(ret != 0)) { 605 ret = -EFAULT; 606 goto out_err1; 607 } 608 609 if (srf->scanout && 610 srf->num_sizes == 1 && 611 srf->sizes[0].width == 64 && 612 srf->sizes[0].height == 64 && 613 srf->format == SVGA3D_A8R8G8B8) { 614 615 /* allocate image area and clear it */ 616 srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL); 617 if (!srf->snooper.image) { 618 DRM_ERROR("Failed to allocate cursor_image\n"); 619 ret = -ENOMEM; 620 goto out_err1; 621 } 622 } else { 623 srf->snooper.image = NULL; 624 } 625 srf->snooper.crtc = NULL; 626 627 user_srf->base.shareable = false; 628 user_srf->base.tfile = NULL; 629 630 /** 631 * From this point, the generic resource management functions 632 * destroy the object on failure. 633 */ 634 635 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); 636 if (unlikely(ret != 0)) 637 return ret; 638 639 tmp = vmw_resource_reference(&srf->res); 640 ret = ttm_base_object_init(tfile, &user_srf->base, 641 req->shareable, VMW_RES_SURFACE, 642 &vmw_user_surface_base_release, NULL); 643 644 if (unlikely(ret != 0)) { 645 vmw_resource_unreference(&tmp); 646 vmw_resource_unreference(&res); 647 return ret; 648 } 649 650 rep->sid = user_srf->base.hash.key; 651 if (rep->sid == SVGA3D_INVALID_ID) 652 DRM_ERROR("Created bad Surface ID.\n"); 653 654 vmw_resource_unreference(&res); 655 return 0; 656 out_err1: 657 kfree(srf->sizes); 658 out_err0: 659 kfree(user_srf); 660 return ret; 661 } 662 663 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, 664 struct drm_file *file_priv) 665 { 666 union drm_vmw_surface_reference_arg *arg = 667 (union drm_vmw_surface_reference_arg *)data; 668 struct drm_vmw_surface_arg *req = &arg->req; 669 struct drm_vmw_surface_create_req *rep = &arg->rep; 670 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 671 struct vmw_surface *srf; 672 struct vmw_user_surface *user_srf; 673 struct drm_vmw_size __user *user_sizes; 674 struct ttm_base_object *base; 675 int ret = -EINVAL; 676 677 base = ttm_base_object_lookup(tfile, req->sid); 678 if (unlikely(base == NULL)) { 679 DRM_ERROR("Could not find surface to reference.\n"); 680 return -EINVAL; 681 } 682 683 if (unlikely(base->object_type != VMW_RES_SURFACE)) 684 goto out_bad_resource; 685 686 user_srf = container_of(base, struct vmw_user_surface, base); 687 srf = &user_srf->srf; 688 689 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); 690 if (unlikely(ret != 0)) { 691 DRM_ERROR("Could not add a reference to a surface.\n"); 692 goto out_no_reference; 693 } 694 695 rep->flags = srf->flags; 696 rep->format = srf->format; 697 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels)); 698 user_sizes = (struct drm_vmw_size __user *)(unsigned long) 699 rep->size_addr; 700 701 if (user_sizes) 702 ret = copy_to_user(user_sizes, srf->sizes, 703 srf->num_sizes * sizeof(*srf->sizes)); 704 if (unlikely(ret != 0)) { 705 DRM_ERROR("copy_to_user failed %p %u\n", 706 user_sizes, srf->num_sizes); 707 ret = -EFAULT; 708 } 709 out_bad_resource: 710 out_no_reference: 711 ttm_base_object_unref(&base); 712 713 return ret; 714 } 715 716 int vmw_surface_check(struct vmw_private *dev_priv, 717 struct ttm_object_file *tfile, 718 uint32_t handle, int *id) 719 { 720 struct ttm_base_object *base; 721 struct vmw_user_surface *user_srf; 722 723 int ret = -EPERM; 724 725 base = ttm_base_object_lookup(tfile, handle); 726 if (unlikely(base == NULL)) 727 return -EINVAL; 728 729 if (unlikely(base->object_type != VMW_RES_SURFACE)) 730 goto out_bad_surface; 731 732 user_srf = container_of(base, struct vmw_user_surface, base); 733 *id = user_srf->srf.res.id; 734 ret = 0; 735 736 out_bad_surface: 737 /** 738 * FIXME: May deadlock here when called from the 739 * command parsing code. 740 */ 741 742 ttm_base_object_unref(&base); 743 return ret; 744 } 745 746 /** 747 * Buffer management. 748 */ 749 750 static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob, 751 unsigned long num_pages) 752 { 753 static size_t bo_user_size = ~0; 754 755 size_t page_array_size = 756 (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK; 757 758 if (unlikely(bo_user_size == ~0)) { 759 bo_user_size = glob->ttm_bo_extra_size + 760 ttm_round_pot(sizeof(struct vmw_dma_buffer)); 761 } 762 763 return bo_user_size + page_array_size; 764 } 765 766 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) 767 { 768 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 769 struct ttm_bo_global *glob = bo->glob; 770 771 ttm_mem_global_free(glob->mem_glob, bo->acc_size); 772 kfree(vmw_bo); 773 } 774 775 int vmw_dmabuf_init(struct vmw_private *dev_priv, 776 struct vmw_dma_buffer *vmw_bo, 777 size_t size, struct ttm_placement *placement, 778 bool interruptible, 779 void (*bo_free) (struct ttm_buffer_object *bo)) 780 { 781 struct ttm_bo_device *bdev = &dev_priv->bdev; 782 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 783 size_t acc_size; 784 int ret; 785 786 BUG_ON(!bo_free); 787 788 acc_size = 789 vmw_dmabuf_acc_size(bdev->glob, 790 (size + PAGE_SIZE - 1) >> PAGE_SHIFT); 791 792 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 793 if (unlikely(ret != 0)) { 794 /* we must free the bo here as 795 * ttm_buffer_object_init does so as well */ 796 bo_free(&vmw_bo->base); 797 return ret; 798 } 799 800 memset(vmw_bo, 0, sizeof(*vmw_bo)); 801 802 INIT_LIST_HEAD(&vmw_bo->validate_list); 803 804 ret = ttm_bo_init(bdev, &vmw_bo->base, size, 805 ttm_bo_type_device, placement, 806 0, 0, interruptible, 807 NULL, acc_size, bo_free); 808 return ret; 809 } 810 811 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) 812 { 813 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); 814 struct ttm_bo_global *glob = bo->glob; 815 816 ttm_mem_global_free(glob->mem_glob, bo->acc_size); 817 kfree(vmw_user_bo); 818 } 819 820 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) 821 { 822 struct vmw_user_dma_buffer *vmw_user_bo; 823 struct ttm_base_object *base = *p_base; 824 struct ttm_buffer_object *bo; 825 826 *p_base = NULL; 827 828 if (unlikely(base == NULL)) 829 return; 830 831 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); 832 bo = &vmw_user_bo->dma.base; 833 ttm_bo_unref(&bo); 834 } 835 836 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, 837 struct drm_file *file_priv) 838 { 839 struct vmw_private *dev_priv = vmw_priv(dev); 840 union drm_vmw_alloc_dmabuf_arg *arg = 841 (union drm_vmw_alloc_dmabuf_arg *)data; 842 struct drm_vmw_alloc_dmabuf_req *req = &arg->req; 843 struct drm_vmw_dmabuf_rep *rep = &arg->rep; 844 struct vmw_user_dma_buffer *vmw_user_bo; 845 struct ttm_buffer_object *tmp; 846 struct vmw_master *vmaster = vmw_master(file_priv->master); 847 int ret; 848 849 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL); 850 if (unlikely(vmw_user_bo == NULL)) 851 return -ENOMEM; 852 853 ret = ttm_read_lock(&vmaster->lock, true); 854 if (unlikely(ret != 0)) { 855 kfree(vmw_user_bo); 856 return ret; 857 } 858 859 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, 860 &vmw_vram_sys_placement, true, 861 &vmw_user_dmabuf_destroy); 862 if (unlikely(ret != 0)) 863 goto out_no_dmabuf; 864 865 tmp = ttm_bo_reference(&vmw_user_bo->dma.base); 866 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, 867 &vmw_user_bo->base, 868 false, 869 ttm_buffer_type, 870 &vmw_user_dmabuf_release, NULL); 871 if (unlikely(ret != 0)) 872 goto out_no_base_object; 873 else { 874 rep->handle = vmw_user_bo->base.hash.key; 875 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset; 876 rep->cur_gmr_id = vmw_user_bo->base.hash.key; 877 rep->cur_gmr_offset = 0; 878 } 879 880 out_no_base_object: 881 ttm_bo_unref(&tmp); 882 out_no_dmabuf: 883 ttm_read_unlock(&vmaster->lock); 884 885 return ret; 886 } 887 888 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, 889 struct drm_file *file_priv) 890 { 891 struct drm_vmw_unref_dmabuf_arg *arg = 892 (struct drm_vmw_unref_dmabuf_arg *)data; 893 894 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, 895 arg->handle, 896 TTM_REF_USAGE); 897 } 898 899 uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, 900 uint32_t cur_validate_node) 901 { 902 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 903 904 if (likely(vmw_bo->on_validate_list)) 905 return vmw_bo->cur_validate_node; 906 907 vmw_bo->cur_validate_node = cur_validate_node; 908 vmw_bo->on_validate_list = true; 909 910 return cur_validate_node; 911 } 912 913 void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo) 914 { 915 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 916 917 vmw_bo->on_validate_list = false; 918 } 919 920 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, 921 uint32_t handle, struct vmw_dma_buffer **out) 922 { 923 struct vmw_user_dma_buffer *vmw_user_bo; 924 struct ttm_base_object *base; 925 926 base = ttm_base_object_lookup(tfile, handle); 927 if (unlikely(base == NULL)) { 928 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", 929 (unsigned long)handle); 930 return -ESRCH; 931 } 932 933 if (unlikely(base->object_type != ttm_buffer_type)) { 934 ttm_base_object_unref(&base); 935 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", 936 (unsigned long)handle); 937 return -EINVAL; 938 } 939 940 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); 941 (void)ttm_bo_reference(&vmw_user_bo->dma.base); 942 ttm_base_object_unref(&base); 943 *out = &vmw_user_bo->dma; 944 945 return 0; 946 } 947 948 /* 949 * Stream management 950 */ 951 952 static void vmw_stream_destroy(struct vmw_resource *res) 953 { 954 struct vmw_private *dev_priv = res->dev_priv; 955 struct vmw_stream *stream; 956 int ret; 957 958 DRM_INFO("%s: unref\n", __func__); 959 stream = container_of(res, struct vmw_stream, res); 960 961 ret = vmw_overlay_unref(dev_priv, stream->stream_id); 962 WARN_ON(ret != 0); 963 } 964 965 static int vmw_stream_init(struct vmw_private *dev_priv, 966 struct vmw_stream *stream, 967 void (*res_free) (struct vmw_resource *res)) 968 { 969 struct vmw_resource *res = &stream->res; 970 int ret; 971 972 ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr, 973 VMW_RES_STREAM, res_free); 974 975 if (unlikely(ret != 0)) { 976 if (res_free == NULL) 977 kfree(stream); 978 else 979 res_free(&stream->res); 980 return ret; 981 } 982 983 ret = vmw_overlay_claim(dev_priv, &stream->stream_id); 984 if (ret) { 985 vmw_resource_unreference(&res); 986 return ret; 987 } 988 989 DRM_INFO("%s: claimed\n", __func__); 990 991 vmw_resource_activate(&stream->res, vmw_stream_destroy); 992 return 0; 993 } 994 995 /** 996 * User-space context management: 997 */ 998 999 static void vmw_user_stream_free(struct vmw_resource *res) 1000 { 1001 struct vmw_user_stream *stream = 1002 container_of(res, struct vmw_user_stream, stream.res); 1003 1004 kfree(stream); 1005 } 1006 1007 /** 1008 * This function is called when user space has no more references on the 1009 * base object. It releases the base-object's reference on the resource object. 1010 */ 1011 1012 static void vmw_user_stream_base_release(struct ttm_base_object **p_base) 1013 { 1014 struct ttm_base_object *base = *p_base; 1015 struct vmw_user_stream *stream = 1016 container_of(base, struct vmw_user_stream, base); 1017 struct vmw_resource *res = &stream->stream.res; 1018 1019 *p_base = NULL; 1020 vmw_resource_unreference(&res); 1021 } 1022 1023 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, 1024 struct drm_file *file_priv) 1025 { 1026 struct vmw_private *dev_priv = vmw_priv(dev); 1027 struct vmw_resource *res; 1028 struct vmw_user_stream *stream; 1029 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; 1030 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1031 int ret = 0; 1032 1033 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id); 1034 if (unlikely(res == NULL)) 1035 return -EINVAL; 1036 1037 if (res->res_free != &vmw_user_stream_free) { 1038 ret = -EINVAL; 1039 goto out; 1040 } 1041 1042 stream = container_of(res, struct vmw_user_stream, stream.res); 1043 if (stream->base.tfile != tfile) { 1044 ret = -EINVAL; 1045 goto out; 1046 } 1047 1048 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE); 1049 out: 1050 vmw_resource_unreference(&res); 1051 return ret; 1052 } 1053 1054 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 1055 struct drm_file *file_priv) 1056 { 1057 struct vmw_private *dev_priv = vmw_priv(dev); 1058 struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL); 1059 struct vmw_resource *res; 1060 struct vmw_resource *tmp; 1061 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; 1062 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1063 int ret; 1064 1065 if (unlikely(stream == NULL)) 1066 return -ENOMEM; 1067 1068 res = &stream->stream.res; 1069 stream->base.shareable = false; 1070 stream->base.tfile = NULL; 1071 1072 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free); 1073 if (unlikely(ret != 0)) 1074 return ret; 1075 1076 tmp = vmw_resource_reference(res); 1077 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM, 1078 &vmw_user_stream_base_release, NULL); 1079 1080 if (unlikely(ret != 0)) { 1081 vmw_resource_unreference(&tmp); 1082 goto out_err; 1083 } 1084 1085 arg->stream_id = res->id; 1086 out_err: 1087 vmw_resource_unreference(&res); 1088 return ret; 1089 } 1090 1091 int vmw_user_stream_lookup(struct vmw_private *dev_priv, 1092 struct ttm_object_file *tfile, 1093 uint32_t *inout_id, struct vmw_resource **out) 1094 { 1095 struct vmw_user_stream *stream; 1096 struct vmw_resource *res; 1097 int ret; 1098 1099 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id); 1100 if (unlikely(res == NULL)) 1101 return -EINVAL; 1102 1103 if (res->res_free != &vmw_user_stream_free) { 1104 ret = -EINVAL; 1105 goto err_ref; 1106 } 1107 1108 stream = container_of(res, struct vmw_user_stream, stream.res); 1109 if (stream->base.tfile != tfile) { 1110 ret = -EPERM; 1111 goto err_ref; 1112 } 1113 1114 *inout_id = stream->stream.stream_id; 1115 *out = res; 1116 return 0; 1117 err_ref: 1118 vmw_resource_unreference(&res); 1119 return ret; 1120 } 1121