1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 #include "vmwgfx_binding.h" 28 #include "vmwgfx_bo.h" 29 #include "vmwgfx_drv.h" 30 #include "vmwgfx_mksstat.h" 31 #include "vmwgfx_so.h" 32 33 #include <drm/ttm/ttm_bo.h> 34 #include <drm/ttm/ttm_placement.h> 35 36 #include <linux/sync_file.h> 37 #include <linux/hashtable.h> 38 39 /* 40 * Helper macro to get dx_ctx_node if available otherwise print an error 41 * message. This is for use in command verifier function where if dx_ctx_node 42 * is not set then command is invalid. 43 */ 44 #define VMW_GET_CTX_NODE(__sw_context) \ 45 ({ \ 46 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \ 47 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \ 48 __sw_context->dx_ctx_node; \ 49 }); \ 50 }) 51 52 #define VMW_DECLARE_CMD_VAR(__var, __type) \ 53 struct { \ 54 SVGA3dCmdHeader header; \ 55 __type body; \ 56 } __var 57 58 /** 59 * struct vmw_relocation - Buffer object relocation 60 * 61 * @head: List head for the command submission context's relocation list 62 * @vbo: Non ref-counted pointer to buffer object 63 * @mob_loc: Pointer to location for mob id to be modified 64 * @location: Pointer to location for guest pointer to be modified 65 */ 66 struct vmw_relocation { 67 struct list_head head; 68 struct vmw_bo *vbo; 69 union { 70 SVGAMobId *mob_loc; 71 SVGAGuestPtr *location; 72 }; 73 }; 74 75 /** 76 * enum vmw_resource_relocation_type - Relocation type for resources 77 * 78 * @vmw_res_rel_normal: Traditional relocation. The resource id in the 79 * command stream is replaced with the actual id after validation. 80 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced 81 * with a NOP. 82 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after 83 * validation is -1, the command is replaced with a NOP. Otherwise no action. 84 * @vmw_res_rel_max: Last value in the enum - used for error checking 85 */ 86 enum vmw_resource_relocation_type { 87 vmw_res_rel_normal, 88 vmw_res_rel_nop, 89 vmw_res_rel_cond_nop, 90 vmw_res_rel_max 91 }; 92 93 /** 94 * struct vmw_resource_relocation - Relocation info for resources 95 * 96 * @head: List head for the software context's relocation list. 97 * @res: Non-ref-counted pointer to the resource. 98 * @offset: Offset of single byte entries into the command buffer where the id 99 * that needs fixup is located. 100 * @rel_type: Type of relocation. 101 */ 102 struct vmw_resource_relocation { 103 struct list_head head; 104 const struct vmw_resource *res; 105 u32 offset:29; 106 enum vmw_resource_relocation_type rel_type:3; 107 }; 108 109 /** 110 * struct vmw_ctx_validation_info - Extra validation metadata for contexts 111 * 112 * @head: List head of context list 113 * @ctx: The context resource 114 * @cur: The context's persistent binding state 115 * @staged: The binding state changes of this command buffer 116 */ 117 struct vmw_ctx_validation_info { 118 struct list_head head; 119 struct vmw_resource *ctx; 120 struct vmw_ctx_binding_state *cur; 121 struct vmw_ctx_binding_state *staged; 122 }; 123 124 /** 125 * struct vmw_cmd_entry - Describe a command for the verifier 126 * 127 * @func: Call-back to handle the command. 128 * @user_allow: Whether allowed from the execbuf ioctl. 129 * @gb_disable: Whether disabled if guest-backed objects are available. 130 * @gb_enable: Whether enabled iff guest-backed objects are available. 131 * @cmd_name: Name of the command. 132 */ 133 struct vmw_cmd_entry { 134 int (*func) (struct vmw_private *, struct vmw_sw_context *, 135 SVGA3dCmdHeader *); 136 bool user_allow; 137 bool gb_disable; 138 bool gb_enable; 139 const char *cmd_name; 140 }; 141 142 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ 143 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ 144 (_gb_disable), (_gb_enable), #_cmd} 145 146 static int vmw_resource_context_res_add(struct vmw_private *dev_priv, 147 struct vmw_sw_context *sw_context, 148 struct vmw_resource *ctx); 149 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, 150 struct vmw_sw_context *sw_context, 151 SVGAMobId *id, 152 struct vmw_bo **vmw_bo_p); 153 /** 154 * vmw_ptr_diff - Compute the offset from a to b in bytes 155 * 156 * @a: A starting pointer. 157 * @b: A pointer offset in the same address space. 158 * 159 * Returns: The offset in bytes between the two pointers. 160 */ 161 static size_t vmw_ptr_diff(void *a, void *b) 162 { 163 return (unsigned long) b - (unsigned long) a; 164 } 165 166 /** 167 * vmw_execbuf_bindings_commit - Commit modified binding state 168 * 169 * @sw_context: The command submission context 170 * @backoff: Whether this is part of the error path and binding state changes 171 * should be ignored 172 */ 173 static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context, 174 bool backoff) 175 { 176 struct vmw_ctx_validation_info *entry; 177 178 list_for_each_entry(entry, &sw_context->ctx_list, head) { 179 if (!backoff) 180 vmw_binding_state_commit(entry->cur, entry->staged); 181 182 if (entry->staged != sw_context->staged_bindings) 183 vmw_binding_state_free(entry->staged); 184 else 185 sw_context->staged_bindings_inuse = false; 186 } 187 188 /* List entries are freed with the validation context */ 189 INIT_LIST_HEAD(&sw_context->ctx_list); 190 } 191 192 /** 193 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced 194 * 195 * @sw_context: The command submission context 196 */ 197 static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context) 198 { 199 if (sw_context->dx_query_mob) 200 vmw_context_bind_dx_query(sw_context->dx_query_ctx, 201 sw_context->dx_query_mob); 202 } 203 204 /** 205 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to 206 * the validate list. 207 * 208 * @dev_priv: Pointer to the device private: 209 * @sw_context: The command submission context 210 * @res: Pointer to the resource 211 * @node: The validation node holding the context resource metadata 212 */ 213 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, 214 struct vmw_sw_context *sw_context, 215 struct vmw_resource *res, 216 struct vmw_ctx_validation_info *node) 217 { 218 int ret; 219 220 ret = vmw_resource_context_res_add(dev_priv, sw_context, res); 221 if (unlikely(ret != 0)) 222 goto out_err; 223 224 if (!sw_context->staged_bindings) { 225 sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv); 226 if (IS_ERR(sw_context->staged_bindings)) { 227 ret = PTR_ERR(sw_context->staged_bindings); 228 sw_context->staged_bindings = NULL; 229 goto out_err; 230 } 231 } 232 233 if (sw_context->staged_bindings_inuse) { 234 node->staged = vmw_binding_state_alloc(dev_priv); 235 if (IS_ERR(node->staged)) { 236 ret = PTR_ERR(node->staged); 237 node->staged = NULL; 238 goto out_err; 239 } 240 } else { 241 node->staged = sw_context->staged_bindings; 242 sw_context->staged_bindings_inuse = true; 243 } 244 245 node->ctx = res; 246 node->cur = vmw_context_binding_state(res); 247 list_add_tail(&node->head, &sw_context->ctx_list); 248 249 return 0; 250 251 out_err: 252 return ret; 253 } 254 255 /** 256 * vmw_execbuf_res_size - calculate extra size fore the resource validation node 257 * 258 * @dev_priv: Pointer to the device private struct. 259 * @res_type: The resource type. 260 * 261 * Guest-backed contexts and DX contexts require extra size to store execbuf 262 * private information in the validation node. Typically the binding manager 263 * associated data structures. 264 * 265 * Returns: The extra size requirement based on resource type. 266 */ 267 static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv, 268 enum vmw_res_type res_type) 269 { 270 return (res_type == vmw_res_dx_context || 271 (res_type == vmw_res_context && dev_priv->has_mob)) ? 272 sizeof(struct vmw_ctx_validation_info) : 0; 273 } 274 275 /** 276 * vmw_execbuf_rcache_update - Update a resource-node cache entry 277 * 278 * @rcache: Pointer to the entry to update. 279 * @res: Pointer to the resource. 280 * @private: Pointer to the execbuf-private space in the resource validation 281 * node. 282 */ 283 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache, 284 struct vmw_resource *res, 285 void *private) 286 { 287 rcache->res = res; 288 rcache->private = private; 289 rcache->valid = 1; 290 rcache->valid_handle = 0; 291 } 292 293 enum vmw_val_add_flags { 294 vmw_val_add_flag_none = 0, 295 vmw_val_add_flag_noctx = 1 << 0, 296 }; 297 298 /** 299 * vmw_execbuf_res_val_add - Add a resource to the validation list. 300 * 301 * @sw_context: Pointer to the software context. 302 * @res: Unreferenced rcu-protected pointer to the resource. 303 * @dirty: Whether to change dirty status. 304 * @flags: specifies whether to use the context or not 305 * 306 * Returns: 0 on success. Negative error code on failure. Typical error codes 307 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed. 308 */ 309 static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context, 310 struct vmw_resource *res, 311 u32 dirty, 312 u32 flags) 313 { 314 struct vmw_private *dev_priv = res->dev_priv; 315 int ret; 316 enum vmw_res_type res_type = vmw_res_type(res); 317 struct vmw_res_cache_entry *rcache; 318 struct vmw_ctx_validation_info *ctx_info; 319 bool first_usage; 320 unsigned int priv_size; 321 322 rcache = &sw_context->res_cache[res_type]; 323 if (likely(rcache->valid && rcache->res == res)) { 324 if (dirty) 325 vmw_validation_res_set_dirty(sw_context->ctx, 326 rcache->private, dirty); 327 return 0; 328 } 329 330 if ((flags & vmw_val_add_flag_noctx) != 0) { 331 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty, 332 (void **)&ctx_info, NULL); 333 if (ret) 334 return ret; 335 336 } else { 337 priv_size = vmw_execbuf_res_size(dev_priv, res_type); 338 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size, 339 dirty, (void **)&ctx_info, 340 &first_usage); 341 if (ret) 342 return ret; 343 344 if (priv_size && first_usage) { 345 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res, 346 ctx_info); 347 if (ret) { 348 VMW_DEBUG_USER("Failed first usage context setup.\n"); 349 return ret; 350 } 351 } 352 } 353 354 vmw_execbuf_rcache_update(rcache, res, ctx_info); 355 return 0; 356 } 357 358 /** 359 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the 360 * validation list 361 * 362 * @sw_context: The software context holding the validation list. 363 * @view: Pointer to the view resource. 364 * 365 * Returns 0 if success, negative error code otherwise. 366 */ 367 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context, 368 struct vmw_resource *view) 369 { 370 int ret; 371 372 /* 373 * First add the resource the view is pointing to, otherwise it may be 374 * swapped out when the view is validated. 375 */ 376 ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view), 377 vmw_view_dirtying(view), vmw_val_add_flag_noctx); 378 if (ret) 379 return ret; 380 381 return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE, 382 vmw_val_add_flag_noctx); 383 } 384 385 /** 386 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing 387 * to to the validation list. 388 * 389 * @sw_context: The software context holding the validation list. 390 * @view_type: The view type to look up. 391 * @id: view id of the view. 392 * 393 * The view is represented by a view id and the DX context it's created on, or 394 * scheduled for creation on. If there is no DX context set, the function will 395 * return an -EINVAL error pointer. 396 * 397 * Returns: Unreferenced pointer to the resource on success, negative error 398 * pointer on failure. 399 */ 400 static struct vmw_resource * 401 vmw_view_id_val_add(struct vmw_sw_context *sw_context, 402 enum vmw_view_type view_type, u32 id) 403 { 404 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 405 struct vmw_resource *view; 406 int ret; 407 408 if (!ctx_node) 409 return ERR_PTR(-EINVAL); 410 411 view = vmw_view_lookup(sw_context->man, view_type, id); 412 if (IS_ERR(view)) 413 return view; 414 415 ret = vmw_view_res_val_add(sw_context, view); 416 if (ret) 417 return ERR_PTR(ret); 418 419 return view; 420 } 421 422 /** 423 * vmw_resource_context_res_add - Put resources previously bound to a context on 424 * the validation list 425 * 426 * @dev_priv: Pointer to a device private structure 427 * @sw_context: Pointer to a software context used for this command submission 428 * @ctx: Pointer to the context resource 429 * 430 * This function puts all resources that were previously bound to @ctx on the 431 * resource validation list. This is part of the context state reemission 432 */ 433 static int vmw_resource_context_res_add(struct vmw_private *dev_priv, 434 struct vmw_sw_context *sw_context, 435 struct vmw_resource *ctx) 436 { 437 struct list_head *binding_list; 438 struct vmw_ctx_bindinfo *entry; 439 int ret = 0; 440 struct vmw_resource *res; 441 u32 i; 442 u32 cotable_max = has_sm5_context(ctx->dev_priv) ? 443 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX; 444 445 /* Add all cotables to the validation list. */ 446 if (has_sm4_context(dev_priv) && 447 vmw_res_type(ctx) == vmw_res_dx_context) { 448 for (i = 0; i < cotable_max; ++i) { 449 res = vmw_context_cotable(ctx, i); 450 if (IS_ERR(res)) 451 continue; 452 453 ret = vmw_execbuf_res_val_add(sw_context, res, 454 VMW_RES_DIRTY_SET, 455 vmw_val_add_flag_noctx); 456 if (unlikely(ret != 0)) 457 return ret; 458 } 459 } 460 461 /* Add all resources bound to the context to the validation list */ 462 mutex_lock(&dev_priv->binding_mutex); 463 binding_list = vmw_context_binding_list(ctx); 464 465 list_for_each_entry(entry, binding_list, ctx_list) { 466 if (vmw_res_type(entry->res) == vmw_res_view) 467 ret = vmw_view_res_val_add(sw_context, entry->res); 468 else 469 ret = vmw_execbuf_res_val_add(sw_context, entry->res, 470 vmw_binding_dirtying(entry->bt), 471 vmw_val_add_flag_noctx); 472 if (unlikely(ret != 0)) 473 break; 474 } 475 476 if (has_sm4_context(dev_priv) && 477 vmw_res_type(ctx) == vmw_res_dx_context) { 478 struct vmw_bo *dx_query_mob; 479 480 dx_query_mob = vmw_context_get_dx_query_mob(ctx); 481 if (dx_query_mob) { 482 vmw_bo_placement_set(dx_query_mob, 483 VMW_BO_DOMAIN_MOB, 484 VMW_BO_DOMAIN_MOB); 485 ret = vmw_validation_add_bo(sw_context->ctx, 486 dx_query_mob); 487 } 488 } 489 490 mutex_unlock(&dev_priv->binding_mutex); 491 return ret; 492 } 493 494 /** 495 * vmw_resource_relocation_add - Add a relocation to the relocation list 496 * 497 * @sw_context: Pointer to the software context. 498 * @res: The resource. 499 * @offset: Offset into the command buffer currently being parsed where the id 500 * that needs fixup is located. Granularity is one byte. 501 * @rel_type: Relocation type. 502 */ 503 static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context, 504 const struct vmw_resource *res, 505 unsigned long offset, 506 enum vmw_resource_relocation_type 507 rel_type) 508 { 509 struct vmw_resource_relocation *rel; 510 511 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel)); 512 if (unlikely(!rel)) { 513 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n"); 514 return -ENOMEM; 515 } 516 517 rel->res = res; 518 rel->offset = offset; 519 rel->rel_type = rel_type; 520 list_add_tail(&rel->head, &sw_context->res_relocations); 521 522 return 0; 523 } 524 525 /** 526 * vmw_resource_relocations_free - Free all relocations on a list 527 * 528 * @list: Pointer to the head of the relocation list 529 */ 530 static void vmw_resource_relocations_free(struct list_head *list) 531 { 532 /* Memory is validation context memory, so no need to free it */ 533 INIT_LIST_HEAD(list); 534 } 535 536 /** 537 * vmw_resource_relocations_apply - Apply all relocations on a list 538 * 539 * @cb: Pointer to the start of the command buffer bein patch. This need not be 540 * the same buffer as the one being parsed when the relocation list was built, 541 * but the contents must be the same modulo the resource ids. 542 * @list: Pointer to the head of the relocation list. 543 */ 544 static void vmw_resource_relocations_apply(uint32_t *cb, 545 struct list_head *list) 546 { 547 struct vmw_resource_relocation *rel; 548 549 /* Validate the struct vmw_resource_relocation member size */ 550 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29)); 551 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3)); 552 553 list_for_each_entry(rel, list, head) { 554 u32 *addr = (u32 *)((unsigned long) cb + rel->offset); 555 switch (rel->rel_type) { 556 case vmw_res_rel_normal: 557 *addr = rel->res->id; 558 break; 559 case vmw_res_rel_nop: 560 *addr = SVGA_3D_CMD_NOP; 561 break; 562 default: 563 if (rel->res->id == -1) 564 *addr = SVGA_3D_CMD_NOP; 565 break; 566 } 567 } 568 } 569 570 static int vmw_cmd_invalid(struct vmw_private *dev_priv, 571 struct vmw_sw_context *sw_context, 572 SVGA3dCmdHeader *header) 573 { 574 return -EINVAL; 575 } 576 577 static int vmw_cmd_ok(struct vmw_private *dev_priv, 578 struct vmw_sw_context *sw_context, 579 SVGA3dCmdHeader *header) 580 { 581 return 0; 582 } 583 584 /** 585 * vmw_resources_reserve - Reserve all resources on the sw_context's resource 586 * list. 587 * 588 * @sw_context: Pointer to the software context. 589 * 590 * Note that since vmware's command submission currently is protected by the 591 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since 592 * only a single thread at once will attempt this. 593 */ 594 static int vmw_resources_reserve(struct vmw_sw_context *sw_context) 595 { 596 int ret; 597 598 ret = vmw_validation_res_reserve(sw_context->ctx, true); 599 if (ret) 600 return ret; 601 602 if (sw_context->dx_query_mob) { 603 struct vmw_bo *expected_dx_query_mob; 604 605 expected_dx_query_mob = 606 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx); 607 if (expected_dx_query_mob && 608 expected_dx_query_mob != sw_context->dx_query_mob) { 609 ret = -EINVAL; 610 } 611 } 612 613 return ret; 614 } 615 616 /** 617 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the 618 * resource validate list unless it's already there. 619 * 620 * @dev_priv: Pointer to a device private structure. 621 * @sw_context: Pointer to the software context. 622 * @res_type: Resource type. 623 * @dirty: Whether to change dirty status. 624 * @converter: User-space visisble type specific information. 625 * @id_loc: Pointer to the location in the command buffer currently being parsed 626 * from where the user-space resource id handle is located. 627 * @p_res: Pointer to pointer to resource validalidation node. Populated on 628 * exit. 629 */ 630 static int 631 vmw_cmd_res_check(struct vmw_private *dev_priv, 632 struct vmw_sw_context *sw_context, 633 enum vmw_res_type res_type, 634 u32 dirty, 635 const struct vmw_user_resource_conv *converter, 636 uint32_t *id_loc, 637 struct vmw_resource **p_res) 638 { 639 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type]; 640 struct vmw_resource *res; 641 int ret = 0; 642 bool needs_unref = false; 643 644 if (p_res) 645 *p_res = NULL; 646 647 if (*id_loc == SVGA3D_INVALID_ID) { 648 if (res_type == vmw_res_context) { 649 VMW_DEBUG_USER("Illegal context invalid id.\n"); 650 return -EINVAL; 651 } 652 return 0; 653 } 654 655 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) { 656 res = rcache->res; 657 if (dirty) 658 vmw_validation_res_set_dirty(sw_context->ctx, 659 rcache->private, dirty); 660 } else { 661 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type); 662 663 ret = vmw_validation_preload_res(sw_context->ctx, size); 664 if (ret) 665 return ret; 666 667 ret = vmw_user_resource_lookup_handle 668 (dev_priv, sw_context->fp->tfile, *id_loc, converter, &res); 669 if (ret != 0) { 670 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n", 671 (unsigned int) *id_loc); 672 return ret; 673 } 674 needs_unref = true; 675 676 ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none); 677 if (unlikely(ret != 0)) 678 goto res_check_done; 679 680 if (rcache->valid && rcache->res == res) { 681 rcache->valid_handle = true; 682 rcache->handle = *id_loc; 683 } 684 } 685 686 ret = vmw_resource_relocation_add(sw_context, res, 687 vmw_ptr_diff(sw_context->buf_start, 688 id_loc), 689 vmw_res_rel_normal); 690 if (p_res) 691 *p_res = res; 692 693 res_check_done: 694 if (needs_unref) 695 vmw_resource_unreference(&res); 696 697 return ret; 698 } 699 700 /** 701 * vmw_rebind_all_dx_query - Rebind DX query associated with the context 702 * 703 * @ctx_res: context the query belongs to 704 * 705 * This function assumes binding_mutex is held. 706 */ 707 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) 708 { 709 struct vmw_private *dev_priv = ctx_res->dev_priv; 710 struct vmw_bo *dx_query_mob; 711 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery); 712 713 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res); 714 715 if (!dx_query_mob || dx_query_mob->dx_query_ctx) 716 return 0; 717 718 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id); 719 if (cmd == NULL) 720 return -ENOMEM; 721 722 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY; 723 cmd->header.size = sizeof(cmd->body); 724 cmd->body.cid = ctx_res->id; 725 cmd->body.mobid = dx_query_mob->tbo.resource->start; 726 vmw_cmd_commit(dev_priv, sizeof(*cmd)); 727 728 vmw_context_bind_dx_query(ctx_res, dx_query_mob); 729 730 return 0; 731 } 732 733 /** 734 * vmw_rebind_contexts - Rebind all resources previously bound to referenced 735 * contexts. 736 * 737 * @sw_context: Pointer to the software context. 738 * 739 * Rebind context binding points that have been scrubbed because of eviction. 740 */ 741 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) 742 { 743 struct vmw_ctx_validation_info *val; 744 int ret; 745 746 list_for_each_entry(val, &sw_context->ctx_list, head) { 747 ret = vmw_binding_rebind_all(val->cur); 748 if (unlikely(ret != 0)) { 749 if (ret != -ERESTARTSYS) 750 VMW_DEBUG_USER("Failed to rebind context.\n"); 751 return ret; 752 } 753 754 ret = vmw_rebind_all_dx_query(val->ctx); 755 if (ret != 0) { 756 VMW_DEBUG_USER("Failed to rebind queries.\n"); 757 return ret; 758 } 759 } 760 761 return 0; 762 } 763 764 /** 765 * vmw_view_bindings_add - Add an array of view bindings to a context binding 766 * state tracker. 767 * 768 * @sw_context: The execbuf state used for this command. 769 * @view_type: View type for the bindings. 770 * @binding_type: Binding type for the bindings. 771 * @shader_slot: The shader slot to user for the bindings. 772 * @view_ids: Array of view ids to be bound. 773 * @num_views: Number of view ids in @view_ids. 774 * @first_slot: The binding slot to be used for the first view id in @view_ids. 775 */ 776 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context, 777 enum vmw_view_type view_type, 778 enum vmw_ctx_binding_type binding_type, 779 uint32 shader_slot, 780 uint32 view_ids[], u32 num_views, 781 u32 first_slot) 782 { 783 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 784 u32 i; 785 786 if (!ctx_node) 787 return -EINVAL; 788 789 for (i = 0; i < num_views; ++i) { 790 struct vmw_ctx_bindinfo_view binding; 791 struct vmw_resource *view = NULL; 792 793 if (view_ids[i] != SVGA3D_INVALID_ID) { 794 view = vmw_view_id_val_add(sw_context, view_type, 795 view_ids[i]); 796 if (IS_ERR(view)) { 797 VMW_DEBUG_USER("View not found.\n"); 798 return PTR_ERR(view); 799 } 800 } 801 binding.bi.ctx = ctx_node->ctx; 802 binding.bi.res = view; 803 binding.bi.bt = binding_type; 804 binding.shader_slot = shader_slot; 805 binding.slot = first_slot + i; 806 vmw_binding_add(ctx_node->staged, &binding.bi, 807 shader_slot, binding.slot); 808 } 809 810 return 0; 811 } 812 813 /** 814 * vmw_cmd_cid_check - Check a command header for valid context information. 815 * 816 * @dev_priv: Pointer to a device private structure. 817 * @sw_context: Pointer to the software context. 818 * @header: A command header with an embedded user-space context handle. 819 * 820 * Convenience function: Call vmw_cmd_res_check with the user-space context 821 * handle embedded in @header. 822 */ 823 static int vmw_cmd_cid_check(struct vmw_private *dev_priv, 824 struct vmw_sw_context *sw_context, 825 SVGA3dCmdHeader *header) 826 { 827 VMW_DECLARE_CMD_VAR(*cmd, uint32_t) = 828 container_of(header, typeof(*cmd), header); 829 830 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 831 VMW_RES_DIRTY_SET, user_context_converter, 832 &cmd->body, NULL); 833 } 834 835 /** 836 * vmw_execbuf_info_from_res - Get the private validation metadata for a 837 * recently validated resource 838 * 839 * @sw_context: Pointer to the command submission context 840 * @res: The resource 841 * 842 * The resource pointed to by @res needs to be present in the command submission 843 * context's resource cache and hence the last resource of that type to be 844 * processed by the validation code. 845 * 846 * Return: a pointer to the private metadata of the resource, or NULL if it 847 * wasn't found 848 */ 849 static struct vmw_ctx_validation_info * 850 vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context, 851 struct vmw_resource *res) 852 { 853 struct vmw_res_cache_entry *rcache = 854 &sw_context->res_cache[vmw_res_type(res)]; 855 856 if (rcache->valid && rcache->res == res) 857 return rcache->private; 858 859 WARN_ON_ONCE(true); 860 return NULL; 861 } 862 863 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, 864 struct vmw_sw_context *sw_context, 865 SVGA3dCmdHeader *header) 866 { 867 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget); 868 struct vmw_resource *ctx; 869 struct vmw_resource *res; 870 int ret; 871 872 cmd = container_of(header, typeof(*cmd), header); 873 874 if (cmd->body.type >= SVGA3D_RT_MAX) { 875 VMW_DEBUG_USER("Illegal render target type %u.\n", 876 (unsigned int) cmd->body.type); 877 return -EINVAL; 878 } 879 880 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 881 VMW_RES_DIRTY_SET, user_context_converter, 882 &cmd->body.cid, &ctx); 883 if (unlikely(ret != 0)) 884 return ret; 885 886 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 887 VMW_RES_DIRTY_SET, user_surface_converter, 888 &cmd->body.target.sid, &res); 889 if (unlikely(ret)) 890 return ret; 891 892 if (dev_priv->has_mob) { 893 struct vmw_ctx_bindinfo_view binding; 894 struct vmw_ctx_validation_info *node; 895 896 node = vmw_execbuf_info_from_res(sw_context, ctx); 897 if (!node) 898 return -EINVAL; 899 900 binding.bi.ctx = ctx; 901 binding.bi.res = res; 902 binding.bi.bt = vmw_ctx_binding_rt; 903 binding.slot = cmd->body.type; 904 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot); 905 } 906 907 return 0; 908 } 909 910 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, 911 struct vmw_sw_context *sw_context, 912 SVGA3dCmdHeader *header) 913 { 914 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy); 915 int ret; 916 917 cmd = container_of(header, typeof(*cmd), header); 918 919 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 920 VMW_RES_DIRTY_NONE, user_surface_converter, 921 &cmd->body.src.sid, NULL); 922 if (ret) 923 return ret; 924 925 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 926 VMW_RES_DIRTY_SET, user_surface_converter, 927 &cmd->body.dest.sid, NULL); 928 } 929 930 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv, 931 struct vmw_sw_context *sw_context, 932 SVGA3dCmdHeader *header) 933 { 934 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy); 935 int ret; 936 937 cmd = container_of(header, typeof(*cmd), header); 938 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 939 VMW_RES_DIRTY_NONE, user_surface_converter, 940 &cmd->body.src, NULL); 941 if (ret != 0) 942 return ret; 943 944 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 945 VMW_RES_DIRTY_SET, user_surface_converter, 946 &cmd->body.dest, NULL); 947 } 948 949 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv, 950 struct vmw_sw_context *sw_context, 951 SVGA3dCmdHeader *header) 952 { 953 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion); 954 int ret; 955 956 cmd = container_of(header, typeof(*cmd), header); 957 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 958 VMW_RES_DIRTY_NONE, user_surface_converter, 959 &cmd->body.srcSid, NULL); 960 if (ret != 0) 961 return ret; 962 963 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 964 VMW_RES_DIRTY_SET, user_surface_converter, 965 &cmd->body.dstSid, NULL); 966 } 967 968 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, 969 struct vmw_sw_context *sw_context, 970 SVGA3dCmdHeader *header) 971 { 972 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt); 973 int ret; 974 975 cmd = container_of(header, typeof(*cmd), header); 976 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 977 VMW_RES_DIRTY_NONE, user_surface_converter, 978 &cmd->body.src.sid, NULL); 979 if (unlikely(ret != 0)) 980 return ret; 981 982 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 983 VMW_RES_DIRTY_SET, user_surface_converter, 984 &cmd->body.dest.sid, NULL); 985 } 986 987 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, 988 struct vmw_sw_context *sw_context, 989 SVGA3dCmdHeader *header) 990 { 991 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) = 992 container_of(header, typeof(*cmd), header); 993 994 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 995 VMW_RES_DIRTY_NONE, user_surface_converter, 996 &cmd->body.srcImage.sid, NULL); 997 } 998 999 static int vmw_cmd_present_check(struct vmw_private *dev_priv, 1000 struct vmw_sw_context *sw_context, 1001 SVGA3dCmdHeader *header) 1002 { 1003 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) = 1004 container_of(header, typeof(*cmd), header); 1005 1006 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1007 VMW_RES_DIRTY_NONE, user_surface_converter, 1008 &cmd->body.sid, NULL); 1009 } 1010 1011 /** 1012 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. 1013 * 1014 * @dev_priv: The device private structure. 1015 * @new_query_bo: The new buffer holding query results. 1016 * @sw_context: The software context used for this command submission. 1017 * 1018 * This function checks whether @new_query_bo is suitable for holding query 1019 * results, and if another buffer currently is pinned for query results. If so, 1020 * the function prepares the state of @sw_context for switching pinned buffers 1021 * after successful submission of the current command batch. 1022 */ 1023 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, 1024 struct vmw_bo *new_query_bo, 1025 struct vmw_sw_context *sw_context) 1026 { 1027 struct vmw_res_cache_entry *ctx_entry = 1028 &sw_context->res_cache[vmw_res_context]; 1029 int ret; 1030 1031 BUG_ON(!ctx_entry->valid); 1032 sw_context->last_query_ctx = ctx_entry->res; 1033 1034 if (unlikely(new_query_bo != sw_context->cur_query_bo)) { 1035 1036 if (unlikely(PFN_UP(new_query_bo->tbo.resource->size) > 4)) { 1037 VMW_DEBUG_USER("Query buffer too large.\n"); 1038 return -EINVAL; 1039 } 1040 1041 if (unlikely(sw_context->cur_query_bo != NULL)) { 1042 sw_context->needs_post_query_barrier = true; 1043 vmw_bo_placement_set_default_accelerated(sw_context->cur_query_bo); 1044 ret = vmw_validation_add_bo(sw_context->ctx, 1045 sw_context->cur_query_bo); 1046 if (unlikely(ret != 0)) 1047 return ret; 1048 } 1049 sw_context->cur_query_bo = new_query_bo; 1050 1051 vmw_bo_placement_set_default_accelerated(dev_priv->dummy_query_bo); 1052 ret = vmw_validation_add_bo(sw_context->ctx, 1053 dev_priv->dummy_query_bo); 1054 if (unlikely(ret != 0)) 1055 return ret; 1056 } 1057 1058 return 0; 1059 } 1060 1061 /** 1062 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer 1063 * 1064 * @dev_priv: The device private structure. 1065 * @sw_context: The software context used for this command submission batch. 1066 * 1067 * This function will check if we're switching query buffers, and will then, 1068 * issue a dummy occlusion query wait used as a query barrier. When the fence 1069 * object following that query wait has signaled, we are sure that all preceding 1070 * queries have finished, and the old query buffer can be unpinned. However, 1071 * since both the new query buffer and the old one are fenced with that fence, 1072 * we can do an asynchronus unpin now, and be sure that the old query buffer 1073 * won't be moved until the fence has signaled. 1074 * 1075 * As mentioned above, both the new - and old query buffers need to be fenced 1076 * using a sequence emitted *after* calling this function. 1077 */ 1078 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, 1079 struct vmw_sw_context *sw_context) 1080 { 1081 /* 1082 * The validate list should still hold references to all 1083 * contexts here. 1084 */ 1085 if (sw_context->needs_post_query_barrier) { 1086 struct vmw_res_cache_entry *ctx_entry = 1087 &sw_context->res_cache[vmw_res_context]; 1088 struct vmw_resource *ctx; 1089 int ret; 1090 1091 BUG_ON(!ctx_entry->valid); 1092 ctx = ctx_entry->res; 1093 1094 ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id); 1095 1096 if (unlikely(ret != 0)) 1097 VMW_DEBUG_USER("Out of fifo space for dummy query.\n"); 1098 } 1099 1100 if (dev_priv->pinned_bo != sw_context->cur_query_bo) { 1101 if (dev_priv->pinned_bo) { 1102 vmw_bo_pin_reserved(dev_priv->pinned_bo, false); 1103 vmw_bo_unreference(&dev_priv->pinned_bo); 1104 } 1105 1106 if (!sw_context->needs_post_query_barrier) { 1107 vmw_bo_pin_reserved(sw_context->cur_query_bo, true); 1108 1109 /* 1110 * We pin also the dummy_query_bo buffer so that we 1111 * don't need to validate it when emitting dummy queries 1112 * in context destroy paths. 1113 */ 1114 if (!dev_priv->dummy_query_bo_pinned) { 1115 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, 1116 true); 1117 dev_priv->dummy_query_bo_pinned = true; 1118 } 1119 1120 BUG_ON(sw_context->last_query_ctx == NULL); 1121 dev_priv->query_cid = sw_context->last_query_ctx->id; 1122 dev_priv->query_cid_valid = true; 1123 dev_priv->pinned_bo = 1124 vmw_bo_reference(sw_context->cur_query_bo); 1125 } 1126 } 1127 } 1128 1129 /** 1130 * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle 1131 * to a MOB id. 1132 * 1133 * @dev_priv: Pointer to a device private structure. 1134 * @sw_context: The software context used for this command batch validation. 1135 * @id: Pointer to the user-space handle to be translated. 1136 * @vmw_bo_p: Points to a location that, on successful return will carry a 1137 * non-reference-counted pointer to the buffer object identified by the 1138 * user-space handle in @id. 1139 * 1140 * This function saves information needed to translate a user-space buffer 1141 * handle to a MOB id. The translation does not take place immediately, but 1142 * during a call to vmw_apply_relocations(). 1143 * 1144 * This function builds a relocation list and a list of buffers to validate. The 1145 * former needs to be freed using either vmw_apply_relocations() or 1146 * vmw_free_relocations(). The latter needs to be freed using 1147 * vmw_clear_validations. 1148 */ 1149 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, 1150 struct vmw_sw_context *sw_context, 1151 SVGAMobId *id, 1152 struct vmw_bo **vmw_bo_p) 1153 { 1154 struct vmw_bo *vmw_bo, *tmp_bo; 1155 uint32_t handle = *id; 1156 struct vmw_relocation *reloc; 1157 int ret; 1158 1159 vmw_validation_preload_bo(sw_context->ctx); 1160 ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo); 1161 if (ret != 0) { 1162 drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n"); 1163 return PTR_ERR(vmw_bo); 1164 } 1165 vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB); 1166 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo); 1167 tmp_bo = vmw_bo; 1168 vmw_user_bo_unref(&tmp_bo); 1169 if (unlikely(ret != 0)) 1170 return ret; 1171 1172 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc)); 1173 if (!reloc) 1174 return -ENOMEM; 1175 1176 reloc->mob_loc = id; 1177 reloc->vbo = vmw_bo; 1178 1179 *vmw_bo_p = vmw_bo; 1180 list_add_tail(&reloc->head, &sw_context->bo_relocations); 1181 1182 return 0; 1183 } 1184 1185 /** 1186 * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle 1187 * to a valid SVGAGuestPtr 1188 * 1189 * @dev_priv: Pointer to a device private structure. 1190 * @sw_context: The software context used for this command batch validation. 1191 * @ptr: Pointer to the user-space handle to be translated. 1192 * @vmw_bo_p: Points to a location that, on successful return will carry a 1193 * non-reference-counted pointer to the DMA buffer identified by the user-space 1194 * handle in @id. 1195 * 1196 * This function saves information needed to translate a user-space buffer 1197 * handle to a valid SVGAGuestPtr. The translation does not take place 1198 * immediately, but during a call to vmw_apply_relocations(). 1199 * 1200 * This function builds a relocation list and a list of buffers to validate. 1201 * The former needs to be freed using either vmw_apply_relocations() or 1202 * vmw_free_relocations(). The latter needs to be freed using 1203 * vmw_clear_validations. 1204 */ 1205 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, 1206 struct vmw_sw_context *sw_context, 1207 SVGAGuestPtr *ptr, 1208 struct vmw_bo **vmw_bo_p) 1209 { 1210 struct vmw_bo *vmw_bo, *tmp_bo; 1211 uint32_t handle = ptr->gmrId; 1212 struct vmw_relocation *reloc; 1213 int ret; 1214 1215 vmw_validation_preload_bo(sw_context->ctx); 1216 ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo); 1217 if (ret != 0) { 1218 drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n"); 1219 return PTR_ERR(vmw_bo); 1220 } 1221 vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, 1222 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM); 1223 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo); 1224 tmp_bo = vmw_bo; 1225 vmw_user_bo_unref(&tmp_bo); 1226 if (unlikely(ret != 0)) 1227 return ret; 1228 1229 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc)); 1230 if (!reloc) 1231 return -ENOMEM; 1232 1233 reloc->location = ptr; 1234 reloc->vbo = vmw_bo; 1235 *vmw_bo_p = vmw_bo; 1236 list_add_tail(&reloc->head, &sw_context->bo_relocations); 1237 1238 return 0; 1239 } 1240 1241 /** 1242 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command. 1243 * 1244 * @dev_priv: Pointer to a device private struct. 1245 * @sw_context: The software context used for this command submission. 1246 * @header: Pointer to the command header in the command stream. 1247 * 1248 * This function adds the new query into the query COTABLE 1249 */ 1250 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv, 1251 struct vmw_sw_context *sw_context, 1252 SVGA3dCmdHeader *header) 1253 { 1254 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery); 1255 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 1256 struct vmw_resource *cotable_res; 1257 int ret; 1258 1259 if (!ctx_node) 1260 return -EINVAL; 1261 1262 cmd = container_of(header, typeof(*cmd), header); 1263 1264 if (cmd->body.type < SVGA3D_QUERYTYPE_MIN || 1265 cmd->body.type >= SVGA3D_QUERYTYPE_MAX) 1266 return -EINVAL; 1267 1268 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY); 1269 ret = vmw_cotable_notify(cotable_res, cmd->body.queryId); 1270 1271 return ret; 1272 } 1273 1274 /** 1275 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command. 1276 * 1277 * @dev_priv: Pointer to a device private struct. 1278 * @sw_context: The software context used for this command submission. 1279 * @header: Pointer to the command header in the command stream. 1280 * 1281 * The query bind operation will eventually associate the query ID with its 1282 * backing MOB. In this function, we take the user mode MOB ID and use 1283 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent. 1284 */ 1285 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, 1286 struct vmw_sw_context *sw_context, 1287 SVGA3dCmdHeader *header) 1288 { 1289 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery); 1290 struct vmw_bo *vmw_bo; 1291 int ret; 1292 1293 cmd = container_of(header, typeof(*cmd), header); 1294 1295 /* 1296 * Look up the buffer pointed to by q.mobid, put it on the relocation 1297 * list so its kernel mode MOB ID can be filled in later 1298 */ 1299 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid, 1300 &vmw_bo); 1301 1302 if (ret != 0) 1303 return ret; 1304 1305 sw_context->dx_query_mob = vmw_bo; 1306 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx; 1307 return 0; 1308 } 1309 1310 /** 1311 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command. 1312 * 1313 * @dev_priv: Pointer to a device private struct. 1314 * @sw_context: The software context used for this command submission. 1315 * @header: Pointer to the command header in the command stream. 1316 */ 1317 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, 1318 struct vmw_sw_context *sw_context, 1319 SVGA3dCmdHeader *header) 1320 { 1321 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) = 1322 container_of(header, typeof(*cmd), header); 1323 1324 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1325 VMW_RES_DIRTY_SET, user_context_converter, 1326 &cmd->body.cid, NULL); 1327 } 1328 1329 /** 1330 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command. 1331 * 1332 * @dev_priv: Pointer to a device private struct. 1333 * @sw_context: The software context used for this command submission. 1334 * @header: Pointer to the command header in the command stream. 1335 */ 1336 static int vmw_cmd_begin_query(struct vmw_private *dev_priv, 1337 struct vmw_sw_context *sw_context, 1338 SVGA3dCmdHeader *header) 1339 { 1340 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) = 1341 container_of(header, typeof(*cmd), header); 1342 1343 if (unlikely(dev_priv->has_mob)) { 1344 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery); 1345 1346 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); 1347 1348 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY; 1349 gb_cmd.header.size = cmd->header.size; 1350 gb_cmd.body.cid = cmd->body.cid; 1351 gb_cmd.body.type = cmd->body.type; 1352 1353 memcpy(cmd, &gb_cmd, sizeof(*cmd)); 1354 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header); 1355 } 1356 1357 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1358 VMW_RES_DIRTY_SET, user_context_converter, 1359 &cmd->body.cid, NULL); 1360 } 1361 1362 /** 1363 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command. 1364 * 1365 * @dev_priv: Pointer to a device private struct. 1366 * @sw_context: The software context used for this command submission. 1367 * @header: Pointer to the command header in the command stream. 1368 */ 1369 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, 1370 struct vmw_sw_context *sw_context, 1371 SVGA3dCmdHeader *header) 1372 { 1373 struct vmw_bo *vmw_bo; 1374 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery); 1375 int ret; 1376 1377 cmd = container_of(header, typeof(*cmd), header); 1378 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1379 if (unlikely(ret != 0)) 1380 return ret; 1381 1382 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid, 1383 &vmw_bo); 1384 if (unlikely(ret != 0)) 1385 return ret; 1386 1387 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); 1388 1389 return ret; 1390 } 1391 1392 /** 1393 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command. 1394 * 1395 * @dev_priv: Pointer to a device private struct. 1396 * @sw_context: The software context used for this command submission. 1397 * @header: Pointer to the command header in the command stream. 1398 */ 1399 static int vmw_cmd_end_query(struct vmw_private *dev_priv, 1400 struct vmw_sw_context *sw_context, 1401 SVGA3dCmdHeader *header) 1402 { 1403 struct vmw_bo *vmw_bo; 1404 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery); 1405 int ret; 1406 1407 cmd = container_of(header, typeof(*cmd), header); 1408 if (dev_priv->has_mob) { 1409 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery); 1410 1411 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); 1412 1413 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY; 1414 gb_cmd.header.size = cmd->header.size; 1415 gb_cmd.body.cid = cmd->body.cid; 1416 gb_cmd.body.type = cmd->body.type; 1417 gb_cmd.body.mobid = cmd->body.guestResult.gmrId; 1418 gb_cmd.body.offset = cmd->body.guestResult.offset; 1419 1420 memcpy(cmd, &gb_cmd, sizeof(*cmd)); 1421 return vmw_cmd_end_gb_query(dev_priv, sw_context, header); 1422 } 1423 1424 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1425 if (unlikely(ret != 0)) 1426 return ret; 1427 1428 ret = vmw_translate_guest_ptr(dev_priv, sw_context, 1429 &cmd->body.guestResult, &vmw_bo); 1430 if (unlikely(ret != 0)) 1431 return ret; 1432 1433 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); 1434 1435 return ret; 1436 } 1437 1438 /** 1439 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command. 1440 * 1441 * @dev_priv: Pointer to a device private struct. 1442 * @sw_context: The software context used for this command submission. 1443 * @header: Pointer to the command header in the command stream. 1444 */ 1445 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, 1446 struct vmw_sw_context *sw_context, 1447 SVGA3dCmdHeader *header) 1448 { 1449 struct vmw_bo *vmw_bo; 1450 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery); 1451 int ret; 1452 1453 cmd = container_of(header, typeof(*cmd), header); 1454 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1455 if (unlikely(ret != 0)) 1456 return ret; 1457 1458 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid, 1459 &vmw_bo); 1460 if (unlikely(ret != 0)) 1461 return ret; 1462 1463 return 0; 1464 } 1465 1466 /** 1467 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command. 1468 * 1469 * @dev_priv: Pointer to a device private struct. 1470 * @sw_context: The software context used for this command submission. 1471 * @header: Pointer to the command header in the command stream. 1472 */ 1473 static int vmw_cmd_wait_query(struct vmw_private *dev_priv, 1474 struct vmw_sw_context *sw_context, 1475 SVGA3dCmdHeader *header) 1476 { 1477 struct vmw_bo *vmw_bo; 1478 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery); 1479 int ret; 1480 1481 cmd = container_of(header, typeof(*cmd), header); 1482 if (dev_priv->has_mob) { 1483 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery); 1484 1485 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); 1486 1487 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; 1488 gb_cmd.header.size = cmd->header.size; 1489 gb_cmd.body.cid = cmd->body.cid; 1490 gb_cmd.body.type = cmd->body.type; 1491 gb_cmd.body.mobid = cmd->body.guestResult.gmrId; 1492 gb_cmd.body.offset = cmd->body.guestResult.offset; 1493 1494 memcpy(cmd, &gb_cmd, sizeof(*cmd)); 1495 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header); 1496 } 1497 1498 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1499 if (unlikely(ret != 0)) 1500 return ret; 1501 1502 ret = vmw_translate_guest_ptr(dev_priv, sw_context, 1503 &cmd->body.guestResult, &vmw_bo); 1504 if (unlikely(ret != 0)) 1505 return ret; 1506 1507 return 0; 1508 } 1509 1510 static int vmw_cmd_dma(struct vmw_private *dev_priv, 1511 struct vmw_sw_context *sw_context, 1512 SVGA3dCmdHeader *header) 1513 { 1514 struct vmw_bo *vmw_bo = NULL; 1515 struct vmw_surface *srf = NULL; 1516 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA); 1517 int ret; 1518 SVGA3dCmdSurfaceDMASuffix *suffix; 1519 uint32_t bo_size; 1520 bool dirty; 1521 1522 cmd = container_of(header, typeof(*cmd), header); 1523 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body + 1524 header->size - sizeof(*suffix)); 1525 1526 /* Make sure device and verifier stays in sync. */ 1527 if (unlikely(suffix->suffixSize != sizeof(*suffix))) { 1528 VMW_DEBUG_USER("Invalid DMA suffix size.\n"); 1529 return -EINVAL; 1530 } 1531 1532 ret = vmw_translate_guest_ptr(dev_priv, sw_context, 1533 &cmd->body.guest.ptr, &vmw_bo); 1534 if (unlikely(ret != 0)) 1535 return ret; 1536 1537 /* Make sure DMA doesn't cross BO boundaries. */ 1538 bo_size = vmw_bo->tbo.base.size; 1539 if (unlikely(cmd->body.guest.ptr.offset > bo_size)) { 1540 VMW_DEBUG_USER("Invalid DMA offset.\n"); 1541 return -EINVAL; 1542 } 1543 1544 bo_size -= cmd->body.guest.ptr.offset; 1545 if (unlikely(suffix->maximumOffset > bo_size)) 1546 suffix->maximumOffset = bo_size; 1547 1548 dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ? 1549 VMW_RES_DIRTY_SET : 0; 1550 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1551 dirty, user_surface_converter, 1552 &cmd->body.host.sid, NULL); 1553 if (unlikely(ret != 0)) { 1554 if (unlikely(ret != -ERESTARTSYS)) 1555 VMW_DEBUG_USER("could not find surface for DMA.\n"); 1556 return ret; 1557 } 1558 1559 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); 1560 1561 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header); 1562 1563 return 0; 1564 } 1565 1566 static int vmw_cmd_draw(struct vmw_private *dev_priv, 1567 struct vmw_sw_context *sw_context, 1568 SVGA3dCmdHeader *header) 1569 { 1570 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives); 1571 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)( 1572 (unsigned long)header + sizeof(*cmd)); 1573 SVGA3dPrimitiveRange *range; 1574 uint32_t i; 1575 uint32_t maxnum; 1576 int ret; 1577 1578 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1579 if (unlikely(ret != 0)) 1580 return ret; 1581 1582 cmd = container_of(header, typeof(*cmd), header); 1583 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl); 1584 1585 if (unlikely(cmd->body.numVertexDecls > maxnum)) { 1586 VMW_DEBUG_USER("Illegal number of vertex declarations.\n"); 1587 return -EINVAL; 1588 } 1589 1590 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { 1591 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1592 VMW_RES_DIRTY_NONE, 1593 user_surface_converter, 1594 &decl->array.surfaceId, NULL); 1595 if (unlikely(ret != 0)) 1596 return ret; 1597 } 1598 1599 maxnum = (header->size - sizeof(cmd->body) - 1600 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range); 1601 if (unlikely(cmd->body.numRanges > maxnum)) { 1602 VMW_DEBUG_USER("Illegal number of index ranges.\n"); 1603 return -EINVAL; 1604 } 1605 1606 range = (SVGA3dPrimitiveRange *) decl; 1607 for (i = 0; i < cmd->body.numRanges; ++i, ++range) { 1608 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1609 VMW_RES_DIRTY_NONE, 1610 user_surface_converter, 1611 &range->indexArray.surfaceId, NULL); 1612 if (unlikely(ret != 0)) 1613 return ret; 1614 } 1615 return 0; 1616 } 1617 1618 static int vmw_cmd_tex_state(struct vmw_private *dev_priv, 1619 struct vmw_sw_context *sw_context, 1620 SVGA3dCmdHeader *header) 1621 { 1622 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState); 1623 SVGA3dTextureState *last_state = (SVGA3dTextureState *) 1624 ((unsigned long) header + header->size + sizeof(*header)); 1625 SVGA3dTextureState *cur_state = (SVGA3dTextureState *) 1626 ((unsigned long) header + sizeof(*cmd)); 1627 struct vmw_resource *ctx; 1628 struct vmw_resource *res; 1629 int ret; 1630 1631 cmd = container_of(header, typeof(*cmd), header); 1632 1633 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1634 VMW_RES_DIRTY_SET, user_context_converter, 1635 &cmd->body.cid, &ctx); 1636 if (unlikely(ret != 0)) 1637 return ret; 1638 1639 for (; cur_state < last_state; ++cur_state) { 1640 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) 1641 continue; 1642 1643 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) { 1644 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n", 1645 (unsigned int) cur_state->stage); 1646 return -EINVAL; 1647 } 1648 1649 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1650 VMW_RES_DIRTY_NONE, 1651 user_surface_converter, 1652 &cur_state->value, &res); 1653 if (unlikely(ret != 0)) 1654 return ret; 1655 1656 if (dev_priv->has_mob) { 1657 struct vmw_ctx_bindinfo_tex binding; 1658 struct vmw_ctx_validation_info *node; 1659 1660 node = vmw_execbuf_info_from_res(sw_context, ctx); 1661 if (!node) 1662 return -EINVAL; 1663 1664 binding.bi.ctx = ctx; 1665 binding.bi.res = res; 1666 binding.bi.bt = vmw_ctx_binding_tex; 1667 binding.texture_stage = cur_state->stage; 1668 vmw_binding_add(node->staged, &binding.bi, 0, 1669 binding.texture_stage); 1670 } 1671 } 1672 1673 return 0; 1674 } 1675 1676 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, 1677 struct vmw_sw_context *sw_context, 1678 void *buf) 1679 { 1680 struct vmw_bo *vmw_bo; 1681 1682 struct { 1683 uint32_t header; 1684 SVGAFifoCmdDefineGMRFB body; 1685 } *cmd = buf; 1686 1687 return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr, 1688 &vmw_bo); 1689 } 1690 1691 /** 1692 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer 1693 * switching 1694 * 1695 * @dev_priv: Pointer to a device private struct. 1696 * @sw_context: The software context being used for this batch. 1697 * @res: Pointer to the resource. 1698 * @buf_id: Pointer to the user-space backup buffer handle in the command 1699 * stream. 1700 * @backup_offset: Offset of backup into MOB. 1701 * 1702 * This function prepares for registering a switch of backup buffers in the 1703 * resource metadata just prior to unreserving. It's basically a wrapper around 1704 * vmw_cmd_res_switch_backup with a different interface. 1705 */ 1706 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, 1707 struct vmw_sw_context *sw_context, 1708 struct vmw_resource *res, uint32_t *buf_id, 1709 unsigned long backup_offset) 1710 { 1711 struct vmw_bo *vbo; 1712 void *info; 1713 int ret; 1714 1715 info = vmw_execbuf_info_from_res(sw_context, res); 1716 if (!info) 1717 return -EINVAL; 1718 1719 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo); 1720 if (ret) 1721 return ret; 1722 1723 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo, 1724 backup_offset); 1725 return 0; 1726 } 1727 1728 /** 1729 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching 1730 * 1731 * @dev_priv: Pointer to a device private struct. 1732 * @sw_context: The software context being used for this batch. 1733 * @res_type: The resource type. 1734 * @converter: Information about user-space binding for this resource type. 1735 * @res_id: Pointer to the user-space resource handle in the command stream. 1736 * @buf_id: Pointer to the user-space backup buffer handle in the command 1737 * stream. 1738 * @backup_offset: Offset of backup into MOB. 1739 * 1740 * This function prepares for registering a switch of backup buffers in the 1741 * resource metadata just prior to unreserving. It's basically a wrapper around 1742 * vmw_cmd_res_switch_backup with a different interface. 1743 */ 1744 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, 1745 struct vmw_sw_context *sw_context, 1746 enum vmw_res_type res_type, 1747 const struct vmw_user_resource_conv 1748 *converter, uint32_t *res_id, uint32_t *buf_id, 1749 unsigned long backup_offset) 1750 { 1751 struct vmw_resource *res; 1752 int ret; 1753 1754 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, 1755 VMW_RES_DIRTY_NONE, converter, res_id, &res); 1756 if (ret) 1757 return ret; 1758 1759 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id, 1760 backup_offset); 1761 } 1762 1763 /** 1764 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command 1765 * 1766 * @dev_priv: Pointer to a device private struct. 1767 * @sw_context: The software context being used for this batch. 1768 * @header: Pointer to the command header in the command stream. 1769 */ 1770 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, 1771 struct vmw_sw_context *sw_context, 1772 SVGA3dCmdHeader *header) 1773 { 1774 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) = 1775 container_of(header, typeof(*cmd), header); 1776 1777 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface, 1778 user_surface_converter, &cmd->body.sid, 1779 &cmd->body.mobid, 0); 1780 } 1781 1782 /** 1783 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command 1784 * 1785 * @dev_priv: Pointer to a device private struct. 1786 * @sw_context: The software context being used for this batch. 1787 * @header: Pointer to the command header in the command stream. 1788 */ 1789 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, 1790 struct vmw_sw_context *sw_context, 1791 SVGA3dCmdHeader *header) 1792 { 1793 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) = 1794 container_of(header, typeof(*cmd), header); 1795 1796 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1797 VMW_RES_DIRTY_NONE, user_surface_converter, 1798 &cmd->body.image.sid, NULL); 1799 } 1800 1801 /** 1802 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command 1803 * 1804 * @dev_priv: Pointer to a device private struct. 1805 * @sw_context: The software context being used for this batch. 1806 * @header: Pointer to the command header in the command stream. 1807 */ 1808 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, 1809 struct vmw_sw_context *sw_context, 1810 SVGA3dCmdHeader *header) 1811 { 1812 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) = 1813 container_of(header, typeof(*cmd), header); 1814 1815 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1816 VMW_RES_DIRTY_CLEAR, user_surface_converter, 1817 &cmd->body.sid, NULL); 1818 } 1819 1820 /** 1821 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command 1822 * 1823 * @dev_priv: Pointer to a device private struct. 1824 * @sw_context: The software context being used for this batch. 1825 * @header: Pointer to the command header in the command stream. 1826 */ 1827 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, 1828 struct vmw_sw_context *sw_context, 1829 SVGA3dCmdHeader *header) 1830 { 1831 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) = 1832 container_of(header, typeof(*cmd), header); 1833 1834 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1835 VMW_RES_DIRTY_NONE, user_surface_converter, 1836 &cmd->body.image.sid, NULL); 1837 } 1838 1839 /** 1840 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE 1841 * command 1842 * 1843 * @dev_priv: Pointer to a device private struct. 1844 * @sw_context: The software context being used for this batch. 1845 * @header: Pointer to the command header in the command stream. 1846 */ 1847 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, 1848 struct vmw_sw_context *sw_context, 1849 SVGA3dCmdHeader *header) 1850 { 1851 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) = 1852 container_of(header, typeof(*cmd), header); 1853 1854 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1855 VMW_RES_DIRTY_CLEAR, user_surface_converter, 1856 &cmd->body.sid, NULL); 1857 } 1858 1859 /** 1860 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE 1861 * command 1862 * 1863 * @dev_priv: Pointer to a device private struct. 1864 * @sw_context: The software context being used for this batch. 1865 * @header: Pointer to the command header in the command stream. 1866 */ 1867 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, 1868 struct vmw_sw_context *sw_context, 1869 SVGA3dCmdHeader *header) 1870 { 1871 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) = 1872 container_of(header, typeof(*cmd), header); 1873 1874 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1875 VMW_RES_DIRTY_NONE, user_surface_converter, 1876 &cmd->body.image.sid, NULL); 1877 } 1878 1879 /** 1880 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE 1881 * command 1882 * 1883 * @dev_priv: Pointer to a device private struct. 1884 * @sw_context: The software context being used for this batch. 1885 * @header: Pointer to the command header in the command stream. 1886 */ 1887 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, 1888 struct vmw_sw_context *sw_context, 1889 SVGA3dCmdHeader *header) 1890 { 1891 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) = 1892 container_of(header, typeof(*cmd), header); 1893 1894 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1895 VMW_RES_DIRTY_CLEAR, user_surface_converter, 1896 &cmd->body.sid, NULL); 1897 } 1898 1899 /** 1900 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command 1901 * 1902 * @dev_priv: Pointer to a device private struct. 1903 * @sw_context: The software context being used for this batch. 1904 * @header: Pointer to the command header in the command stream. 1905 */ 1906 static int vmw_cmd_shader_define(struct vmw_private *dev_priv, 1907 struct vmw_sw_context *sw_context, 1908 SVGA3dCmdHeader *header) 1909 { 1910 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader); 1911 int ret; 1912 size_t size; 1913 struct vmw_resource *ctx; 1914 1915 cmd = container_of(header, typeof(*cmd), header); 1916 1917 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1918 VMW_RES_DIRTY_SET, user_context_converter, 1919 &cmd->body.cid, &ctx); 1920 if (unlikely(ret != 0)) 1921 return ret; 1922 1923 if (unlikely(!dev_priv->has_mob)) 1924 return 0; 1925 1926 size = cmd->header.size - sizeof(cmd->body); 1927 ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx), 1928 cmd->body.shid, cmd + 1, cmd->body.type, 1929 size, &sw_context->staged_cmd_res); 1930 if (unlikely(ret != 0)) 1931 return ret; 1932 1933 return vmw_resource_relocation_add(sw_context, NULL, 1934 vmw_ptr_diff(sw_context->buf_start, 1935 &cmd->header.id), 1936 vmw_res_rel_nop); 1937 } 1938 1939 /** 1940 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command 1941 * 1942 * @dev_priv: Pointer to a device private struct. 1943 * @sw_context: The software context being used for this batch. 1944 * @header: Pointer to the command header in the command stream. 1945 */ 1946 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, 1947 struct vmw_sw_context *sw_context, 1948 SVGA3dCmdHeader *header) 1949 { 1950 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader); 1951 int ret; 1952 struct vmw_resource *ctx; 1953 1954 cmd = container_of(header, typeof(*cmd), header); 1955 1956 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1957 VMW_RES_DIRTY_SET, user_context_converter, 1958 &cmd->body.cid, &ctx); 1959 if (unlikely(ret != 0)) 1960 return ret; 1961 1962 if (unlikely(!dev_priv->has_mob)) 1963 return 0; 1964 1965 ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid, 1966 cmd->body.type, &sw_context->staged_cmd_res); 1967 if (unlikely(ret != 0)) 1968 return ret; 1969 1970 return vmw_resource_relocation_add(sw_context, NULL, 1971 vmw_ptr_diff(sw_context->buf_start, 1972 &cmd->header.id), 1973 vmw_res_rel_nop); 1974 } 1975 1976 /** 1977 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command 1978 * 1979 * @dev_priv: Pointer to a device private struct. 1980 * @sw_context: The software context being used for this batch. 1981 * @header: Pointer to the command header in the command stream. 1982 */ 1983 static int vmw_cmd_set_shader(struct vmw_private *dev_priv, 1984 struct vmw_sw_context *sw_context, 1985 SVGA3dCmdHeader *header) 1986 { 1987 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader); 1988 struct vmw_ctx_bindinfo_shader binding; 1989 struct vmw_resource *ctx, *res = NULL; 1990 struct vmw_ctx_validation_info *ctx_info; 1991 int ret; 1992 1993 cmd = container_of(header, typeof(*cmd), header); 1994 1995 if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) { 1996 VMW_DEBUG_USER("Illegal shader type %u.\n", 1997 (unsigned int) cmd->body.type); 1998 return -EINVAL; 1999 } 2000 2001 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 2002 VMW_RES_DIRTY_SET, user_context_converter, 2003 &cmd->body.cid, &ctx); 2004 if (unlikely(ret != 0)) 2005 return ret; 2006 2007 if (!dev_priv->has_mob) 2008 return 0; 2009 2010 if (cmd->body.shid != SVGA3D_INVALID_ID) { 2011 /* 2012 * This is the compat shader path - Per device guest-backed 2013 * shaders, but user-space thinks it's per context host- 2014 * backed shaders. 2015 */ 2016 res = vmw_shader_lookup(vmw_context_res_man(ctx), 2017 cmd->body.shid, cmd->body.type); 2018 if (!IS_ERR(res)) { 2019 ret = vmw_execbuf_res_val_add(sw_context, res, 2020 VMW_RES_DIRTY_NONE, 2021 vmw_val_add_flag_noctx); 2022 if (unlikely(ret != 0)) 2023 return ret; 2024 2025 ret = vmw_resource_relocation_add 2026 (sw_context, res, 2027 vmw_ptr_diff(sw_context->buf_start, 2028 &cmd->body.shid), 2029 vmw_res_rel_normal); 2030 if (unlikely(ret != 0)) 2031 return ret; 2032 } 2033 } 2034 2035 if (IS_ERR_OR_NULL(res)) { 2036 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, 2037 VMW_RES_DIRTY_NONE, 2038 user_shader_converter, &cmd->body.shid, 2039 &res); 2040 if (unlikely(ret != 0)) 2041 return ret; 2042 } 2043 2044 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx); 2045 if (!ctx_info) 2046 return -EINVAL; 2047 2048 binding.bi.ctx = ctx; 2049 binding.bi.res = res; 2050 binding.bi.bt = vmw_ctx_binding_shader; 2051 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; 2052 vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0); 2053 2054 return 0; 2055 } 2056 2057 /** 2058 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command 2059 * 2060 * @dev_priv: Pointer to a device private struct. 2061 * @sw_context: The software context being used for this batch. 2062 * @header: Pointer to the command header in the command stream. 2063 */ 2064 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv, 2065 struct vmw_sw_context *sw_context, 2066 SVGA3dCmdHeader *header) 2067 { 2068 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst); 2069 int ret; 2070 2071 cmd = container_of(header, typeof(*cmd), header); 2072 2073 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 2074 VMW_RES_DIRTY_SET, user_context_converter, 2075 &cmd->body.cid, NULL); 2076 if (unlikely(ret != 0)) 2077 return ret; 2078 2079 if (dev_priv->has_mob) 2080 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE; 2081 2082 return 0; 2083 } 2084 2085 /** 2086 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command 2087 * 2088 * @dev_priv: Pointer to a device private struct. 2089 * @sw_context: The software context being used for this batch. 2090 * @header: Pointer to the command header in the command stream. 2091 */ 2092 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, 2093 struct vmw_sw_context *sw_context, 2094 SVGA3dCmdHeader *header) 2095 { 2096 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) = 2097 container_of(header, typeof(*cmd), header); 2098 2099 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader, 2100 user_shader_converter, &cmd->body.shid, 2101 &cmd->body.mobid, cmd->body.offsetInBytes); 2102 } 2103 2104 /** 2105 * vmw_cmd_dx_set_single_constant_buffer - Validate 2106 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command. 2107 * 2108 * @dev_priv: Pointer to a device private struct. 2109 * @sw_context: The software context being used for this batch. 2110 * @header: Pointer to the command header in the command stream. 2111 */ 2112 static int 2113 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, 2114 struct vmw_sw_context *sw_context, 2115 SVGA3dCmdHeader *header) 2116 { 2117 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer); 2118 2119 struct vmw_resource *res = NULL; 2120 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2121 struct vmw_ctx_bindinfo_cb binding; 2122 int ret; 2123 2124 if (!ctx_node) 2125 return -EINVAL; 2126 2127 cmd = container_of(header, typeof(*cmd), header); 2128 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2129 VMW_RES_DIRTY_NONE, user_surface_converter, 2130 &cmd->body.sid, &res); 2131 if (unlikely(ret != 0)) 2132 return ret; 2133 2134 if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) || 2135 cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { 2136 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n", 2137 (unsigned int) cmd->body.type, 2138 (unsigned int) cmd->body.slot); 2139 return -EINVAL; 2140 } 2141 2142 binding.bi.ctx = ctx_node->ctx; 2143 binding.bi.res = res; 2144 binding.bi.bt = vmw_ctx_binding_cb; 2145 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; 2146 binding.offset = cmd->body.offsetInBytes; 2147 binding.size = cmd->body.sizeInBytes; 2148 binding.slot = cmd->body.slot; 2149 2150 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 2151 binding.slot); 2152 2153 return 0; 2154 } 2155 2156 /** 2157 * vmw_cmd_dx_set_constant_buffer_offset - Validate 2158 * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command. 2159 * 2160 * @dev_priv: Pointer to a device private struct. 2161 * @sw_context: The software context being used for this batch. 2162 * @header: Pointer to the command header in the command stream. 2163 */ 2164 static int 2165 vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv, 2166 struct vmw_sw_context *sw_context, 2167 SVGA3dCmdHeader *header) 2168 { 2169 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset); 2170 2171 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2172 u32 shader_slot; 2173 2174 if (!has_sm5_context(dev_priv)) 2175 return -EINVAL; 2176 2177 if (!ctx_node) 2178 return -EINVAL; 2179 2180 cmd = container_of(header, typeof(*cmd), header); 2181 if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { 2182 VMW_DEBUG_USER("Illegal const buffer slot %u.\n", 2183 (unsigned int) cmd->body.slot); 2184 return -EINVAL; 2185 } 2186 2187 shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET; 2188 vmw_binding_cb_offset_update(ctx_node->staged, shader_slot, 2189 cmd->body.slot, cmd->body.offsetInBytes); 2190 2191 return 0; 2192 } 2193 2194 /** 2195 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES 2196 * command 2197 * 2198 * @dev_priv: Pointer to a device private struct. 2199 * @sw_context: The software context being used for this batch. 2200 * @header: Pointer to the command header in the command stream. 2201 */ 2202 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv, 2203 struct vmw_sw_context *sw_context, 2204 SVGA3dCmdHeader *header) 2205 { 2206 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) = 2207 container_of(header, typeof(*cmd), header); 2208 2209 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) / 2210 sizeof(SVGA3dShaderResourceViewId); 2211 2212 if ((u64) cmd->body.startView + (u64) num_sr_view > 2213 (u64) SVGA3D_DX_MAX_SRVIEWS || 2214 !vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) { 2215 VMW_DEBUG_USER("Invalid shader binding.\n"); 2216 return -EINVAL; 2217 } 2218 2219 return vmw_view_bindings_add(sw_context, vmw_view_sr, 2220 vmw_ctx_binding_sr, 2221 cmd->body.type - SVGA3D_SHADERTYPE_MIN, 2222 (void *) &cmd[1], num_sr_view, 2223 cmd->body.startView); 2224 } 2225 2226 /** 2227 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command 2228 * 2229 * @dev_priv: Pointer to a device private struct. 2230 * @sw_context: The software context being used for this batch. 2231 * @header: Pointer to the command header in the command stream. 2232 */ 2233 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, 2234 struct vmw_sw_context *sw_context, 2235 SVGA3dCmdHeader *header) 2236 { 2237 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader); 2238 struct vmw_resource *res = NULL; 2239 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2240 struct vmw_ctx_bindinfo_shader binding; 2241 int ret = 0; 2242 2243 if (!ctx_node) 2244 return -EINVAL; 2245 2246 cmd = container_of(header, typeof(*cmd), header); 2247 2248 if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) { 2249 VMW_DEBUG_USER("Illegal shader type %u.\n", 2250 (unsigned int) cmd->body.type); 2251 return -EINVAL; 2252 } 2253 2254 if (cmd->body.shaderId != SVGA3D_INVALID_ID) { 2255 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0); 2256 if (IS_ERR(res)) { 2257 VMW_DEBUG_USER("Could not find shader for binding.\n"); 2258 return PTR_ERR(res); 2259 } 2260 2261 ret = vmw_execbuf_res_val_add(sw_context, res, 2262 VMW_RES_DIRTY_NONE, 2263 vmw_val_add_flag_noctx); 2264 if (ret) 2265 return ret; 2266 } 2267 2268 binding.bi.ctx = ctx_node->ctx; 2269 binding.bi.res = res; 2270 binding.bi.bt = vmw_ctx_binding_dx_shader; 2271 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; 2272 2273 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0); 2274 2275 return 0; 2276 } 2277 2278 /** 2279 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS 2280 * command 2281 * 2282 * @dev_priv: Pointer to a device private struct. 2283 * @sw_context: The software context being used for this batch. 2284 * @header: Pointer to the command header in the command stream. 2285 */ 2286 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, 2287 struct vmw_sw_context *sw_context, 2288 SVGA3dCmdHeader *header) 2289 { 2290 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2291 struct vmw_ctx_bindinfo_vb binding; 2292 struct vmw_resource *res; 2293 struct { 2294 SVGA3dCmdHeader header; 2295 SVGA3dCmdDXSetVertexBuffers body; 2296 SVGA3dVertexBuffer buf[]; 2297 } *cmd; 2298 int i, ret, num; 2299 2300 if (!ctx_node) 2301 return -EINVAL; 2302 2303 cmd = container_of(header, typeof(*cmd), header); 2304 num = (cmd->header.size - sizeof(cmd->body)) / 2305 sizeof(SVGA3dVertexBuffer); 2306 if ((u64)num + (u64)cmd->body.startBuffer > 2307 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) { 2308 VMW_DEBUG_USER("Invalid number of vertex buffers.\n"); 2309 return -EINVAL; 2310 } 2311 2312 for (i = 0; i < num; i++) { 2313 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2314 VMW_RES_DIRTY_NONE, 2315 user_surface_converter, 2316 &cmd->buf[i].sid, &res); 2317 if (unlikely(ret != 0)) 2318 return ret; 2319 2320 binding.bi.ctx = ctx_node->ctx; 2321 binding.bi.bt = vmw_ctx_binding_vb; 2322 binding.bi.res = res; 2323 binding.offset = cmd->buf[i].offset; 2324 binding.stride = cmd->buf[i].stride; 2325 binding.slot = i + cmd->body.startBuffer; 2326 2327 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot); 2328 } 2329 2330 return 0; 2331 } 2332 2333 /** 2334 * vmw_cmd_dx_set_index_buffer - Validate 2335 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command. 2336 * 2337 * @dev_priv: Pointer to a device private struct. 2338 * @sw_context: The software context being used for this batch. 2339 * @header: Pointer to the command header in the command stream. 2340 */ 2341 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv, 2342 struct vmw_sw_context *sw_context, 2343 SVGA3dCmdHeader *header) 2344 { 2345 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2346 struct vmw_ctx_bindinfo_ib binding; 2347 struct vmw_resource *res; 2348 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer); 2349 int ret; 2350 2351 if (!ctx_node) 2352 return -EINVAL; 2353 2354 cmd = container_of(header, typeof(*cmd), header); 2355 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2356 VMW_RES_DIRTY_NONE, user_surface_converter, 2357 &cmd->body.sid, &res); 2358 if (unlikely(ret != 0)) 2359 return ret; 2360 2361 binding.bi.ctx = ctx_node->ctx; 2362 binding.bi.res = res; 2363 binding.bi.bt = vmw_ctx_binding_ib; 2364 binding.offset = cmd->body.offset; 2365 binding.format = cmd->body.format; 2366 2367 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0); 2368 2369 return 0; 2370 } 2371 2372 /** 2373 * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS 2374 * command 2375 * 2376 * @dev_priv: Pointer to a device private struct. 2377 * @sw_context: The software context being used for this batch. 2378 * @header: Pointer to the command header in the command stream. 2379 */ 2380 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv, 2381 struct vmw_sw_context *sw_context, 2382 SVGA3dCmdHeader *header) 2383 { 2384 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) = 2385 container_of(header, typeof(*cmd), header); 2386 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) / 2387 sizeof(SVGA3dRenderTargetViewId); 2388 int ret; 2389 2390 if (num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS) { 2391 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n"); 2392 return -EINVAL; 2393 } 2394 2395 ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds, 2396 0, &cmd->body.depthStencilViewId, 1, 0); 2397 if (ret) 2398 return ret; 2399 2400 return vmw_view_bindings_add(sw_context, vmw_view_rt, 2401 vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1], 2402 num_rt_view, 0); 2403 } 2404 2405 /** 2406 * vmw_cmd_dx_clear_rendertarget_view - Validate 2407 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command 2408 * 2409 * @dev_priv: Pointer to a device private struct. 2410 * @sw_context: The software context being used for this batch. 2411 * @header: Pointer to the command header in the command stream. 2412 */ 2413 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv, 2414 struct vmw_sw_context *sw_context, 2415 SVGA3dCmdHeader *header) 2416 { 2417 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) = 2418 container_of(header, typeof(*cmd), header); 2419 struct vmw_resource *ret; 2420 2421 ret = vmw_view_id_val_add(sw_context, vmw_view_rt, 2422 cmd->body.renderTargetViewId); 2423 2424 return PTR_ERR_OR_ZERO(ret); 2425 } 2426 2427 /** 2428 * vmw_cmd_dx_clear_depthstencil_view - Validate 2429 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command 2430 * 2431 * @dev_priv: Pointer to a device private struct. 2432 * @sw_context: The software context being used for this batch. 2433 * @header: Pointer to the command header in the command stream. 2434 */ 2435 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv, 2436 struct vmw_sw_context *sw_context, 2437 SVGA3dCmdHeader *header) 2438 { 2439 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) = 2440 container_of(header, typeof(*cmd), header); 2441 struct vmw_resource *ret; 2442 2443 ret = vmw_view_id_val_add(sw_context, vmw_view_ds, 2444 cmd->body.depthStencilViewId); 2445 2446 return PTR_ERR_OR_ZERO(ret); 2447 } 2448 2449 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, 2450 struct vmw_sw_context *sw_context, 2451 SVGA3dCmdHeader *header) 2452 { 2453 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2454 struct vmw_resource *srf; 2455 struct vmw_resource *res; 2456 enum vmw_view_type view_type; 2457 int ret; 2458 /* 2459 * This is based on the fact that all affected define commands have the 2460 * same initial command body layout. 2461 */ 2462 struct { 2463 SVGA3dCmdHeader header; 2464 uint32 defined_id; 2465 uint32 sid; 2466 } *cmd; 2467 2468 if (!ctx_node) 2469 return -EINVAL; 2470 2471 view_type = vmw_view_cmd_to_type(header->id); 2472 if (view_type == vmw_view_max) 2473 return -EINVAL; 2474 2475 cmd = container_of(header, typeof(*cmd), header); 2476 if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) { 2477 VMW_DEBUG_USER("Invalid surface id.\n"); 2478 return -EINVAL; 2479 } 2480 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2481 VMW_RES_DIRTY_NONE, user_surface_converter, 2482 &cmd->sid, &srf); 2483 if (unlikely(ret != 0)) 2484 return ret; 2485 2486 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]); 2487 ret = vmw_cotable_notify(res, cmd->defined_id); 2488 if (unlikely(ret != 0)) 2489 return ret; 2490 2491 return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type, 2492 cmd->defined_id, header, 2493 header->size + sizeof(*header), 2494 &sw_context->staged_cmd_res); 2495 } 2496 2497 /** 2498 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command. 2499 * 2500 * @dev_priv: Pointer to a device private struct. 2501 * @sw_context: The software context being used for this batch. 2502 * @header: Pointer to the command header in the command stream. 2503 */ 2504 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv, 2505 struct vmw_sw_context *sw_context, 2506 SVGA3dCmdHeader *header) 2507 { 2508 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2509 struct vmw_ctx_bindinfo_so_target binding; 2510 struct vmw_resource *res; 2511 struct { 2512 SVGA3dCmdHeader header; 2513 SVGA3dCmdDXSetSOTargets body; 2514 SVGA3dSoTarget targets[]; 2515 } *cmd; 2516 int i, ret, num; 2517 2518 if (!ctx_node) 2519 return -EINVAL; 2520 2521 cmd = container_of(header, typeof(*cmd), header); 2522 num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget); 2523 2524 if (num > SVGA3D_DX_MAX_SOTARGETS) { 2525 VMW_DEBUG_USER("Invalid DX SO binding.\n"); 2526 return -EINVAL; 2527 } 2528 2529 for (i = 0; i < num; i++) { 2530 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2531 VMW_RES_DIRTY_SET, 2532 user_surface_converter, 2533 &cmd->targets[i].sid, &res); 2534 if (unlikely(ret != 0)) 2535 return ret; 2536 2537 binding.bi.ctx = ctx_node->ctx; 2538 binding.bi.res = res; 2539 binding.bi.bt = vmw_ctx_binding_so_target; 2540 binding.offset = cmd->targets[i].offset; 2541 binding.size = cmd->targets[i].sizeInBytes; 2542 binding.slot = i; 2543 2544 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot); 2545 } 2546 2547 return 0; 2548 } 2549 2550 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv, 2551 struct vmw_sw_context *sw_context, 2552 SVGA3dCmdHeader *header) 2553 { 2554 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2555 struct vmw_resource *res; 2556 /* 2557 * This is based on the fact that all affected define commands have 2558 * the same initial command body layout. 2559 */ 2560 struct { 2561 SVGA3dCmdHeader header; 2562 uint32 defined_id; 2563 } *cmd; 2564 enum vmw_so_type so_type; 2565 int ret; 2566 2567 if (!ctx_node) 2568 return -EINVAL; 2569 2570 so_type = vmw_so_cmd_to_type(header->id); 2571 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]); 2572 if (IS_ERR(res)) 2573 return PTR_ERR(res); 2574 cmd = container_of(header, typeof(*cmd), header); 2575 ret = vmw_cotable_notify(res, cmd->defined_id); 2576 2577 return ret; 2578 } 2579 2580 /** 2581 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE 2582 * command 2583 * 2584 * @dev_priv: Pointer to a device private struct. 2585 * @sw_context: The software context being used for this batch. 2586 * @header: Pointer to the command header in the command stream. 2587 */ 2588 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv, 2589 struct vmw_sw_context *sw_context, 2590 SVGA3dCmdHeader *header) 2591 { 2592 struct { 2593 SVGA3dCmdHeader header; 2594 union { 2595 SVGA3dCmdDXReadbackSubResource r_body; 2596 SVGA3dCmdDXInvalidateSubResource i_body; 2597 SVGA3dCmdDXUpdateSubResource u_body; 2598 SVGA3dSurfaceId sid; 2599 }; 2600 } *cmd; 2601 2602 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) != 2603 offsetof(typeof(*cmd), sid)); 2604 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) != 2605 offsetof(typeof(*cmd), sid)); 2606 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) != 2607 offsetof(typeof(*cmd), sid)); 2608 2609 cmd = container_of(header, typeof(*cmd), header); 2610 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2611 VMW_RES_DIRTY_NONE, user_surface_converter, 2612 &cmd->sid, NULL); 2613 } 2614 2615 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv, 2616 struct vmw_sw_context *sw_context, 2617 SVGA3dCmdHeader *header) 2618 { 2619 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2620 2621 if (!ctx_node) 2622 return -EINVAL; 2623 2624 return 0; 2625 } 2626 2627 /** 2628 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view 2629 * resource for removal. 2630 * 2631 * @dev_priv: Pointer to a device private struct. 2632 * @sw_context: The software context being used for this batch. 2633 * @header: Pointer to the command header in the command stream. 2634 * 2635 * Check that the view exists, and if it was not created using this command 2636 * batch, conditionally make this command a NOP. 2637 */ 2638 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, 2639 struct vmw_sw_context *sw_context, 2640 SVGA3dCmdHeader *header) 2641 { 2642 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2643 struct { 2644 SVGA3dCmdHeader header; 2645 union vmw_view_destroy body; 2646 } *cmd = container_of(header, typeof(*cmd), header); 2647 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id); 2648 struct vmw_resource *view; 2649 int ret; 2650 2651 if (!ctx_node) 2652 return -EINVAL; 2653 2654 ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type, 2655 &sw_context->staged_cmd_res, &view); 2656 if (ret || !view) 2657 return ret; 2658 2659 /* 2660 * If the view wasn't created during this command batch, it might 2661 * have been removed due to a context swapout, so add a 2662 * relocation to conditionally make this command a NOP to avoid 2663 * device errors. 2664 */ 2665 return vmw_resource_relocation_add(sw_context, view, 2666 vmw_ptr_diff(sw_context->buf_start, 2667 &cmd->header.id), 2668 vmw_res_rel_cond_nop); 2669 } 2670 2671 /** 2672 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command 2673 * 2674 * @dev_priv: Pointer to a device private struct. 2675 * @sw_context: The software context being used for this batch. 2676 * @header: Pointer to the command header in the command stream. 2677 */ 2678 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv, 2679 struct vmw_sw_context *sw_context, 2680 SVGA3dCmdHeader *header) 2681 { 2682 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2683 struct vmw_resource *res; 2684 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) = 2685 container_of(header, typeof(*cmd), header); 2686 int ret; 2687 2688 if (!ctx_node) 2689 return -EINVAL; 2690 2691 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER); 2692 ret = vmw_cotable_notify(res, cmd->body.shaderId); 2693 if (ret) 2694 return ret; 2695 2696 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx, 2697 cmd->body.shaderId, cmd->body.type, 2698 &sw_context->staged_cmd_res); 2699 } 2700 2701 /** 2702 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command 2703 * 2704 * @dev_priv: Pointer to a device private struct. 2705 * @sw_context: The software context being used for this batch. 2706 * @header: Pointer to the command header in the command stream. 2707 */ 2708 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv, 2709 struct vmw_sw_context *sw_context, 2710 SVGA3dCmdHeader *header) 2711 { 2712 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2713 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) = 2714 container_of(header, typeof(*cmd), header); 2715 int ret; 2716 2717 if (!ctx_node) 2718 return -EINVAL; 2719 2720 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0, 2721 &sw_context->staged_cmd_res); 2722 2723 return ret; 2724 } 2725 2726 /** 2727 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command 2728 * 2729 * @dev_priv: Pointer to a device private struct. 2730 * @sw_context: The software context being used for this batch. 2731 * @header: Pointer to the command header in the command stream. 2732 */ 2733 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv, 2734 struct vmw_sw_context *sw_context, 2735 SVGA3dCmdHeader *header) 2736 { 2737 struct vmw_resource *ctx; 2738 struct vmw_resource *res; 2739 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) = 2740 container_of(header, typeof(*cmd), header); 2741 int ret; 2742 2743 if (cmd->body.cid != SVGA3D_INVALID_ID) { 2744 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 2745 VMW_RES_DIRTY_SET, 2746 user_context_converter, &cmd->body.cid, 2747 &ctx); 2748 if (ret) 2749 return ret; 2750 } else { 2751 struct vmw_ctx_validation_info *ctx_node = 2752 VMW_GET_CTX_NODE(sw_context); 2753 2754 if (!ctx_node) 2755 return -EINVAL; 2756 2757 ctx = ctx_node->ctx; 2758 } 2759 2760 res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0); 2761 if (IS_ERR(res)) { 2762 VMW_DEBUG_USER("Could not find shader to bind.\n"); 2763 return PTR_ERR(res); 2764 } 2765 2766 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE, 2767 vmw_val_add_flag_noctx); 2768 if (ret) { 2769 VMW_DEBUG_USER("Error creating resource validation node.\n"); 2770 return ret; 2771 } 2772 2773 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, 2774 &cmd->body.mobid, 2775 cmd->body.offsetInBytes); 2776 } 2777 2778 /** 2779 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command 2780 * 2781 * @dev_priv: Pointer to a device private struct. 2782 * @sw_context: The software context being used for this batch. 2783 * @header: Pointer to the command header in the command stream. 2784 */ 2785 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv, 2786 struct vmw_sw_context *sw_context, 2787 SVGA3dCmdHeader *header) 2788 { 2789 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) = 2790 container_of(header, typeof(*cmd), header); 2791 struct vmw_resource *view; 2792 struct vmw_res_cache_entry *rcache; 2793 2794 view = vmw_view_id_val_add(sw_context, vmw_view_sr, 2795 cmd->body.shaderResourceViewId); 2796 if (IS_ERR(view)) 2797 return PTR_ERR(view); 2798 2799 /* 2800 * Normally the shader-resource view is not gpu-dirtying, but for 2801 * this particular command it is... 2802 * So mark the last looked-up surface, which is the surface 2803 * the view points to, gpu-dirty. 2804 */ 2805 rcache = &sw_context->res_cache[vmw_res_surface]; 2806 vmw_validation_res_set_dirty(sw_context->ctx, rcache->private, 2807 VMW_RES_DIRTY_SET); 2808 return 0; 2809 } 2810 2811 /** 2812 * vmw_cmd_dx_transfer_from_buffer - Validate 2813 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command 2814 * 2815 * @dev_priv: Pointer to a device private struct. 2816 * @sw_context: The software context being used for this batch. 2817 * @header: Pointer to the command header in the command stream. 2818 */ 2819 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv, 2820 struct vmw_sw_context *sw_context, 2821 SVGA3dCmdHeader *header) 2822 { 2823 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) = 2824 container_of(header, typeof(*cmd), header); 2825 int ret; 2826 2827 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2828 VMW_RES_DIRTY_NONE, user_surface_converter, 2829 &cmd->body.srcSid, NULL); 2830 if (ret != 0) 2831 return ret; 2832 2833 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2834 VMW_RES_DIRTY_SET, user_surface_converter, 2835 &cmd->body.destSid, NULL); 2836 } 2837 2838 /** 2839 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command 2840 * 2841 * @dev_priv: Pointer to a device private struct. 2842 * @sw_context: The software context being used for this batch. 2843 * @header: Pointer to the command header in the command stream. 2844 */ 2845 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv, 2846 struct vmw_sw_context *sw_context, 2847 SVGA3dCmdHeader *header) 2848 { 2849 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) = 2850 container_of(header, typeof(*cmd), header); 2851 2852 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)) 2853 return -EINVAL; 2854 2855 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2856 VMW_RES_DIRTY_SET, user_surface_converter, 2857 &cmd->body.surface.sid, NULL); 2858 } 2859 2860 static int vmw_cmd_sm5(struct vmw_private *dev_priv, 2861 struct vmw_sw_context *sw_context, 2862 SVGA3dCmdHeader *header) 2863 { 2864 if (!has_sm5_context(dev_priv)) 2865 return -EINVAL; 2866 2867 return 0; 2868 } 2869 2870 static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv, 2871 struct vmw_sw_context *sw_context, 2872 SVGA3dCmdHeader *header) 2873 { 2874 if (!has_sm5_context(dev_priv)) 2875 return -EINVAL; 2876 2877 return vmw_cmd_dx_view_define(dev_priv, sw_context, header); 2878 } 2879 2880 static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv, 2881 struct vmw_sw_context *sw_context, 2882 SVGA3dCmdHeader *header) 2883 { 2884 if (!has_sm5_context(dev_priv)) 2885 return -EINVAL; 2886 2887 return vmw_cmd_dx_view_remove(dev_priv, sw_context, header); 2888 } 2889 2890 static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv, 2891 struct vmw_sw_context *sw_context, 2892 SVGA3dCmdHeader *header) 2893 { 2894 struct { 2895 SVGA3dCmdHeader header; 2896 SVGA3dCmdDXClearUAViewUint body; 2897 } *cmd = container_of(header, typeof(*cmd), header); 2898 struct vmw_resource *ret; 2899 2900 if (!has_sm5_context(dev_priv)) 2901 return -EINVAL; 2902 2903 ret = vmw_view_id_val_add(sw_context, vmw_view_ua, 2904 cmd->body.uaViewId); 2905 2906 return PTR_ERR_OR_ZERO(ret); 2907 } 2908 2909 static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv, 2910 struct vmw_sw_context *sw_context, 2911 SVGA3dCmdHeader *header) 2912 { 2913 struct { 2914 SVGA3dCmdHeader header; 2915 SVGA3dCmdDXClearUAViewFloat body; 2916 } *cmd = container_of(header, typeof(*cmd), header); 2917 struct vmw_resource *ret; 2918 2919 if (!has_sm5_context(dev_priv)) 2920 return -EINVAL; 2921 2922 ret = vmw_view_id_val_add(sw_context, vmw_view_ua, 2923 cmd->body.uaViewId); 2924 2925 return PTR_ERR_OR_ZERO(ret); 2926 } 2927 2928 static int vmw_cmd_set_uav(struct vmw_private *dev_priv, 2929 struct vmw_sw_context *sw_context, 2930 SVGA3dCmdHeader *header) 2931 { 2932 struct { 2933 SVGA3dCmdHeader header; 2934 SVGA3dCmdDXSetUAViews body; 2935 } *cmd = container_of(header, typeof(*cmd), header); 2936 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) / 2937 sizeof(SVGA3dUAViewId); 2938 int ret; 2939 2940 if (!has_sm5_context(dev_priv)) 2941 return -EINVAL; 2942 2943 if (num_uav > vmw_max_num_uavs(dev_priv)) { 2944 VMW_DEBUG_USER("Invalid UAV binding.\n"); 2945 return -EINVAL; 2946 } 2947 2948 ret = vmw_view_bindings_add(sw_context, vmw_view_ua, 2949 vmw_ctx_binding_uav, 0, (void *)&cmd[1], 2950 num_uav, 0); 2951 if (ret) 2952 return ret; 2953 2954 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0, 2955 cmd->body.uavSpliceIndex); 2956 2957 return ret; 2958 } 2959 2960 static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv, 2961 struct vmw_sw_context *sw_context, 2962 SVGA3dCmdHeader *header) 2963 { 2964 struct { 2965 SVGA3dCmdHeader header; 2966 SVGA3dCmdDXSetCSUAViews body; 2967 } *cmd = container_of(header, typeof(*cmd), header); 2968 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) / 2969 sizeof(SVGA3dUAViewId); 2970 int ret; 2971 2972 if (!has_sm5_context(dev_priv)) 2973 return -EINVAL; 2974 2975 if (num_uav > vmw_max_num_uavs(dev_priv)) { 2976 VMW_DEBUG_USER("Invalid UAV binding.\n"); 2977 return -EINVAL; 2978 } 2979 2980 ret = vmw_view_bindings_add(sw_context, vmw_view_ua, 2981 vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1], 2982 num_uav, 0); 2983 if (ret) 2984 return ret; 2985 2986 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1, 2987 cmd->body.startIndex); 2988 2989 return ret; 2990 } 2991 2992 static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv, 2993 struct vmw_sw_context *sw_context, 2994 SVGA3dCmdHeader *header) 2995 { 2996 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 2997 struct vmw_resource *res; 2998 struct { 2999 SVGA3dCmdHeader header; 3000 SVGA3dCmdDXDefineStreamOutputWithMob body; 3001 } *cmd = container_of(header, typeof(*cmd), header); 3002 int ret; 3003 3004 if (!has_sm5_context(dev_priv)) 3005 return -EINVAL; 3006 3007 if (!ctx_node) { 3008 DRM_ERROR("DX Context not set.\n"); 3009 return -EINVAL; 3010 } 3011 3012 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT); 3013 ret = vmw_cotable_notify(res, cmd->body.soid); 3014 if (ret) 3015 return ret; 3016 3017 return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx, 3018 cmd->body.soid, 3019 &sw_context->staged_cmd_res); 3020 } 3021 3022 static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv, 3023 struct vmw_sw_context *sw_context, 3024 SVGA3dCmdHeader *header) 3025 { 3026 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 3027 struct vmw_resource *res; 3028 struct { 3029 SVGA3dCmdHeader header; 3030 SVGA3dCmdDXDestroyStreamOutput body; 3031 } *cmd = container_of(header, typeof(*cmd), header); 3032 3033 if (!ctx_node) { 3034 DRM_ERROR("DX Context not set.\n"); 3035 return -EINVAL; 3036 } 3037 3038 /* 3039 * When device does not support SM5 then streamoutput with mob command is 3040 * not available to user-space. Simply return in this case. 3041 */ 3042 if (!has_sm5_context(dev_priv)) 3043 return 0; 3044 3045 /* 3046 * With SM5 capable device if lookup fails then user-space probably used 3047 * old streamoutput define command. Return without an error. 3048 */ 3049 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx), 3050 cmd->body.soid); 3051 if (IS_ERR(res)) 3052 return 0; 3053 3054 return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid, 3055 &sw_context->staged_cmd_res); 3056 } 3057 3058 static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv, 3059 struct vmw_sw_context *sw_context, 3060 SVGA3dCmdHeader *header) 3061 { 3062 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 3063 struct vmw_resource *res; 3064 struct { 3065 SVGA3dCmdHeader header; 3066 SVGA3dCmdDXBindStreamOutput body; 3067 } *cmd = container_of(header, typeof(*cmd), header); 3068 int ret; 3069 3070 if (!has_sm5_context(dev_priv)) 3071 return -EINVAL; 3072 3073 if (!ctx_node) { 3074 DRM_ERROR("DX Context not set.\n"); 3075 return -EINVAL; 3076 } 3077 3078 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx), 3079 cmd->body.soid); 3080 if (IS_ERR(res)) { 3081 DRM_ERROR("Could not find streamoutput to bind.\n"); 3082 return PTR_ERR(res); 3083 } 3084 3085 vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes); 3086 3087 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE, 3088 vmw_val_add_flag_noctx); 3089 if (ret) { 3090 DRM_ERROR("Error creating resource validation node.\n"); 3091 return ret; 3092 } 3093 3094 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, 3095 &cmd->body.mobid, 3096 cmd->body.offsetInBytes); 3097 } 3098 3099 static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv, 3100 struct vmw_sw_context *sw_context, 3101 SVGA3dCmdHeader *header) 3102 { 3103 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 3104 struct vmw_resource *res; 3105 struct vmw_ctx_bindinfo_so binding; 3106 struct { 3107 SVGA3dCmdHeader header; 3108 SVGA3dCmdDXSetStreamOutput body; 3109 } *cmd = container_of(header, typeof(*cmd), header); 3110 int ret; 3111 3112 if (!ctx_node) { 3113 DRM_ERROR("DX Context not set.\n"); 3114 return -EINVAL; 3115 } 3116 3117 if (cmd->body.soid == SVGA3D_INVALID_ID) 3118 return 0; 3119 3120 /* 3121 * When device does not support SM5 then streamoutput with mob command is 3122 * not available to user-space. Simply return in this case. 3123 */ 3124 if (!has_sm5_context(dev_priv)) 3125 return 0; 3126 3127 /* 3128 * With SM5 capable device if lookup fails then user-space probably used 3129 * old streamoutput define command. Return without an error. 3130 */ 3131 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx), 3132 cmd->body.soid); 3133 if (IS_ERR(res)) { 3134 return 0; 3135 } 3136 3137 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE, 3138 vmw_val_add_flag_noctx); 3139 if (ret) { 3140 DRM_ERROR("Error creating resource validation node.\n"); 3141 return ret; 3142 } 3143 3144 binding.bi.ctx = ctx_node->ctx; 3145 binding.bi.res = res; 3146 binding.bi.bt = vmw_ctx_binding_so; 3147 binding.slot = 0; /* Only one SO set to context at a time. */ 3148 3149 vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0, 3150 binding.slot); 3151 3152 return ret; 3153 } 3154 3155 static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv, 3156 struct vmw_sw_context *sw_context, 3157 SVGA3dCmdHeader *header) 3158 { 3159 struct vmw_draw_indexed_instanced_indirect_cmd { 3160 SVGA3dCmdHeader header; 3161 SVGA3dCmdDXDrawIndexedInstancedIndirect body; 3162 } *cmd = container_of(header, typeof(*cmd), header); 3163 3164 if (!has_sm5_context(dev_priv)) 3165 return -EINVAL; 3166 3167 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 3168 VMW_RES_DIRTY_NONE, user_surface_converter, 3169 &cmd->body.argsBufferSid, NULL); 3170 } 3171 3172 static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv, 3173 struct vmw_sw_context *sw_context, 3174 SVGA3dCmdHeader *header) 3175 { 3176 struct vmw_draw_instanced_indirect_cmd { 3177 SVGA3dCmdHeader header; 3178 SVGA3dCmdDXDrawInstancedIndirect body; 3179 } *cmd = container_of(header, typeof(*cmd), header); 3180 3181 if (!has_sm5_context(dev_priv)) 3182 return -EINVAL; 3183 3184 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 3185 VMW_RES_DIRTY_NONE, user_surface_converter, 3186 &cmd->body.argsBufferSid, NULL); 3187 } 3188 3189 static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv, 3190 struct vmw_sw_context *sw_context, 3191 SVGA3dCmdHeader *header) 3192 { 3193 struct vmw_dispatch_indirect_cmd { 3194 SVGA3dCmdHeader header; 3195 SVGA3dCmdDXDispatchIndirect body; 3196 } *cmd = container_of(header, typeof(*cmd), header); 3197 3198 if (!has_sm5_context(dev_priv)) 3199 return -EINVAL; 3200 3201 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 3202 VMW_RES_DIRTY_NONE, user_surface_converter, 3203 &cmd->body.argsBufferSid, NULL); 3204 } 3205 3206 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, 3207 struct vmw_sw_context *sw_context, 3208 void *buf, uint32_t *size) 3209 { 3210 uint32_t size_remaining = *size; 3211 uint32_t cmd_id; 3212 3213 cmd_id = ((uint32_t *)buf)[0]; 3214 switch (cmd_id) { 3215 case SVGA_CMD_UPDATE: 3216 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); 3217 break; 3218 case SVGA_CMD_DEFINE_GMRFB: 3219 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB); 3220 break; 3221 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: 3222 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); 3223 break; 3224 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: 3225 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); 3226 break; 3227 default: 3228 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id); 3229 return -EINVAL; 3230 } 3231 3232 if (*size > size_remaining) { 3233 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n", 3234 cmd_id); 3235 return -EINVAL; 3236 } 3237 3238 if (unlikely(!sw_context->kernel)) { 3239 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id); 3240 return -EPERM; 3241 } 3242 3243 if (cmd_id == SVGA_CMD_DEFINE_GMRFB) 3244 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf); 3245 3246 return 0; 3247 } 3248 3249 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { 3250 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, 3251 false, false, false), 3252 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, 3253 false, false, false), 3254 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check, 3255 true, false, false), 3256 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check, 3257 true, false, false), 3258 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma, 3259 true, false, false), 3260 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid, 3261 false, false, false), 3262 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid, 3263 false, false, false), 3264 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check, 3265 true, false, false), 3266 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check, 3267 true, false, false), 3268 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check, 3269 true, false, false), 3270 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, 3271 &vmw_cmd_set_render_target_check, true, false, false), 3272 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state, 3273 true, false, false), 3274 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check, 3275 true, false, false), 3276 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check, 3277 true, false, false), 3278 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check, 3279 true, false, false), 3280 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check, 3281 true, false, false), 3282 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check, 3283 true, false, false), 3284 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check, 3285 true, false, false), 3286 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, 3287 false, false, false), 3288 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define, 3289 true, false, false), 3290 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy, 3291 true, false, false), 3292 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, 3293 true, false, false), 3294 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const, 3295 true, false, false), 3296 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, 3297 true, false, false), 3298 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, 3299 true, false, false), 3300 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query, 3301 true, false, false), 3302 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query, 3303 true, false, false), 3304 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query, 3305 true, false, false), 3306 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok, 3307 true, false, false), 3308 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, 3309 &vmw_cmd_blt_surf_screen_check, false, false, false), 3310 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid, 3311 false, false, false), 3312 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid, 3313 false, false, false), 3314 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid, 3315 false, false, false), 3316 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid, 3317 false, false, false), 3318 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid, 3319 false, false, false), 3320 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid, 3321 false, false, false), 3322 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid, 3323 false, false, false), 3324 VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false), 3325 VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false), 3326 VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false), 3327 VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false), 3328 VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false), 3329 VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false), 3330 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid, 3331 false, false, true), 3332 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid, 3333 false, false, true), 3334 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid, 3335 false, false, true), 3336 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, 3337 false, false, true), 3338 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid, 3339 false, false, true), 3340 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, 3341 false, false, true), 3342 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, 3343 false, false, true), 3344 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid, 3345 false, false, true), 3346 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface, 3347 true, false, true), 3348 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid, 3349 false, false, true), 3350 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image, 3351 true, false, true), 3352 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE, 3353 &vmw_cmd_update_gb_surface, true, false, true), 3354 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE, 3355 &vmw_cmd_readback_gb_image, true, false, true), 3356 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE, 3357 &vmw_cmd_readback_gb_surface, true, false, true), 3358 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE, 3359 &vmw_cmd_invalidate_gb_image, true, false, true), 3360 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE, 3361 &vmw_cmd_invalidate_gb_surface, true, false, true), 3362 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid, 3363 false, false, true), 3364 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid, 3365 false, false, true), 3366 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid, 3367 false, false, true), 3368 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid, 3369 false, false, true), 3370 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid, 3371 false, false, true), 3372 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid, 3373 false, false, true), 3374 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader, 3375 true, false, true), 3376 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid, 3377 false, false, true), 3378 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid, 3379 false, false, false), 3380 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query, 3381 true, false, true), 3382 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query, 3383 true, false, true), 3384 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query, 3385 true, false, true), 3386 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, 3387 true, false, true), 3388 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok, 3389 true, false, true), 3390 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, 3391 false, false, true), 3392 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, 3393 false, false, true), 3394 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid, 3395 false, false, true), 3396 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid, 3397 false, false, true), 3398 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid, 3399 false, false, true), 3400 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid, 3401 false, false, true), 3402 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid, 3403 false, false, true), 3404 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid, 3405 false, false, true), 3406 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, 3407 false, false, true), 3408 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, 3409 false, false, true), 3410 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, 3411 true, false, true), 3412 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid, 3413 false, false, true), 3414 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid, 3415 false, false, true), 3416 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid, 3417 false, false, true), 3418 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid, 3419 false, false, true), 3420 3421 /* SM commands */ 3422 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid, 3423 false, false, true), 3424 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid, 3425 false, false, true), 3426 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid, 3427 false, false, true), 3428 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid, 3429 false, false, true), 3430 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid, 3431 false, false, true), 3432 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER, 3433 &vmw_cmd_dx_set_single_constant_buffer, true, false, true), 3434 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES, 3435 &vmw_cmd_dx_set_shader_res, true, false, true), 3436 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader, 3437 true, false, true), 3438 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check, 3439 true, false, true), 3440 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check, 3441 true, false, true), 3442 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check, 3443 true, false, true), 3444 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check, 3445 true, false, true), 3446 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED, 3447 &vmw_cmd_dx_cid_check, true, false, true), 3448 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check, 3449 true, false, true), 3450 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS, 3451 &vmw_cmd_dx_set_vertex_buffers, true, false, true), 3452 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER, 3453 &vmw_cmd_dx_set_index_buffer, true, false, true), 3454 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS, 3455 &vmw_cmd_dx_set_rendertargets, true, false, true), 3456 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check, 3457 true, false, true), 3458 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE, 3459 &vmw_cmd_dx_cid_check, true, false, true), 3460 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE, 3461 &vmw_cmd_dx_cid_check, true, false, true), 3462 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query, 3463 true, false, true), 3464 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check, 3465 true, false, true), 3466 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query, 3467 true, false, true), 3468 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET, 3469 &vmw_cmd_dx_cid_check, true, false, true), 3470 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check, 3471 true, false, true), 3472 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check, 3473 true, false, true), 3474 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid, 3475 true, false, true), 3476 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check, 3477 true, false, true), 3478 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check, 3479 true, false, true), 3480 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check, 3481 true, false, true), 3482 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW, 3483 &vmw_cmd_dx_clear_rendertarget_view, true, false, true), 3484 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW, 3485 &vmw_cmd_dx_clear_depthstencil_view, true, false, true), 3486 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid, 3487 true, false, true), 3488 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips, 3489 true, false, true), 3490 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE, 3491 &vmw_cmd_dx_check_subresource, true, false, true), 3492 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE, 3493 &vmw_cmd_dx_check_subresource, true, false, true), 3494 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE, 3495 &vmw_cmd_dx_check_subresource, true, false, true), 3496 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW, 3497 &vmw_cmd_dx_view_define, true, false, true), 3498 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW, 3499 &vmw_cmd_dx_view_remove, true, false, true), 3500 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW, 3501 &vmw_cmd_dx_view_define, true, false, true), 3502 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW, 3503 &vmw_cmd_dx_view_remove, true, false, true), 3504 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW, 3505 &vmw_cmd_dx_view_define, true, false, true), 3506 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW, 3507 &vmw_cmd_dx_view_remove, true, false, true), 3508 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT, 3509 &vmw_cmd_dx_so_define, true, false, true), 3510 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT, 3511 &vmw_cmd_dx_cid_check, true, false, true), 3512 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE, 3513 &vmw_cmd_dx_so_define, true, false, true), 3514 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE, 3515 &vmw_cmd_dx_cid_check, true, false, true), 3516 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE, 3517 &vmw_cmd_dx_so_define, true, false, true), 3518 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE, 3519 &vmw_cmd_dx_cid_check, true, false, true), 3520 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE, 3521 &vmw_cmd_dx_so_define, true, false, true), 3522 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE, 3523 &vmw_cmd_dx_cid_check, true, false, true), 3524 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE, 3525 &vmw_cmd_dx_so_define, true, false, true), 3526 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE, 3527 &vmw_cmd_dx_cid_check, true, false, true), 3528 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER, 3529 &vmw_cmd_dx_define_shader, true, false, true), 3530 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER, 3531 &vmw_cmd_dx_destroy_shader, true, false, true), 3532 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER, 3533 &vmw_cmd_dx_bind_shader, true, false, true), 3534 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT, 3535 &vmw_cmd_dx_so_define, true, false, true), 3536 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT, 3537 &vmw_cmd_dx_destroy_streamoutput, true, false, true), 3538 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, 3539 &vmw_cmd_dx_set_streamoutput, true, false, true), 3540 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS, 3541 &vmw_cmd_dx_set_so_targets, true, false, true), 3542 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT, 3543 &vmw_cmd_dx_cid_check, true, false, true), 3544 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY, 3545 &vmw_cmd_dx_cid_check, true, false, true), 3546 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY, 3547 &vmw_cmd_buffer_copy_check, true, false, true), 3548 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION, 3549 &vmw_cmd_pred_copy_check, true, false, true), 3550 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER, 3551 &vmw_cmd_dx_transfer_from_buffer, 3552 true, false, true), 3553 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET, 3554 &vmw_cmd_dx_set_constant_buffer_offset, 3555 true, false, true), 3556 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET, 3557 &vmw_cmd_dx_set_constant_buffer_offset, 3558 true, false, true), 3559 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET, 3560 &vmw_cmd_dx_set_constant_buffer_offset, 3561 true, false, true), 3562 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET, 3563 &vmw_cmd_dx_set_constant_buffer_offset, 3564 true, false, true), 3565 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET, 3566 &vmw_cmd_dx_set_constant_buffer_offset, 3567 true, false, true), 3568 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET, 3569 &vmw_cmd_dx_set_constant_buffer_offset, 3570 true, false, true), 3571 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy, 3572 true, false, true), 3573 3574 /* 3575 * SM5 commands 3576 */ 3577 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define, 3578 true, false, true), 3579 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove, 3580 true, false, true), 3581 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint, 3582 true, false, true), 3583 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT, 3584 &vmw_cmd_clear_uav_float, true, false, true), 3585 VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true, 3586 false, true), 3587 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false, 3588 true), 3589 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT, 3590 &vmw_cmd_indexed_instanced_indirect, true, false, true), 3591 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT, 3592 &vmw_cmd_instanced_indirect, true, false, true), 3593 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true), 3594 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT, 3595 &vmw_cmd_dispatch_indirect, true, false, true), 3596 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true, 3597 false, true), 3598 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2, 3599 &vmw_cmd_sm5_view_define, true, false, true), 3600 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB, 3601 &vmw_cmd_dx_define_streamoutput, true, false, true), 3602 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT, 3603 &vmw_cmd_dx_bind_streamoutput, true, false, true), 3604 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2, 3605 &vmw_cmd_dx_so_define, true, false, true), 3606 }; 3607 3608 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd) 3609 { 3610 u32 cmd_id = ((u32 *) buf)[0]; 3611 3612 if (cmd_id >= SVGA_CMD_MAX) { 3613 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; 3614 const struct vmw_cmd_entry *entry; 3615 3616 *size = header->size + sizeof(SVGA3dCmdHeader); 3617 cmd_id = header->id; 3618 if (cmd_id >= SVGA_3D_CMD_MAX) 3619 return false; 3620 3621 cmd_id -= SVGA_3D_CMD_BASE; 3622 entry = &vmw_cmd_entries[cmd_id]; 3623 *cmd = entry->cmd_name; 3624 return true; 3625 } 3626 3627 switch (cmd_id) { 3628 case SVGA_CMD_UPDATE: 3629 *cmd = "SVGA_CMD_UPDATE"; 3630 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate); 3631 break; 3632 case SVGA_CMD_DEFINE_GMRFB: 3633 *cmd = "SVGA_CMD_DEFINE_GMRFB"; 3634 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB); 3635 break; 3636 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: 3637 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN"; 3638 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); 3639 break; 3640 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: 3641 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB"; 3642 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); 3643 break; 3644 default: 3645 *cmd = "UNKNOWN"; 3646 *size = 0; 3647 return false; 3648 } 3649 3650 return true; 3651 } 3652 3653 static int vmw_cmd_check(struct vmw_private *dev_priv, 3654 struct vmw_sw_context *sw_context, void *buf, 3655 uint32_t *size) 3656 { 3657 uint32_t cmd_id; 3658 uint32_t size_remaining = *size; 3659 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; 3660 int ret; 3661 const struct vmw_cmd_entry *entry; 3662 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; 3663 3664 cmd_id = ((uint32_t *)buf)[0]; 3665 /* Handle any none 3D commands */ 3666 if (unlikely(cmd_id < SVGA_CMD_MAX)) 3667 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); 3668 3669 3670 cmd_id = header->id; 3671 *size = header->size + sizeof(SVGA3dCmdHeader); 3672 3673 cmd_id -= SVGA_3D_CMD_BASE; 3674 if (unlikely(*size > size_remaining)) 3675 goto out_invalid; 3676 3677 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) 3678 goto out_invalid; 3679 3680 entry = &vmw_cmd_entries[cmd_id]; 3681 if (unlikely(!entry->func)) 3682 goto out_invalid; 3683 3684 if (unlikely(!entry->user_allow && !sw_context->kernel)) 3685 goto out_privileged; 3686 3687 if (unlikely(entry->gb_disable && gb)) 3688 goto out_old; 3689 3690 if (unlikely(entry->gb_enable && !gb)) 3691 goto out_new; 3692 3693 ret = entry->func(dev_priv, sw_context, header); 3694 if (unlikely(ret != 0)) { 3695 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n", 3696 cmd_id + SVGA_3D_CMD_BASE, ret); 3697 return ret; 3698 } 3699 3700 return 0; 3701 out_invalid: 3702 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n", 3703 cmd_id + SVGA_3D_CMD_BASE); 3704 return -EINVAL; 3705 out_privileged: 3706 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n", 3707 cmd_id + SVGA_3D_CMD_BASE); 3708 return -EPERM; 3709 out_old: 3710 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n", 3711 cmd_id + SVGA_3D_CMD_BASE); 3712 return -EINVAL; 3713 out_new: 3714 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n", 3715 cmd_id + SVGA_3D_CMD_BASE); 3716 return -EINVAL; 3717 } 3718 3719 static int vmw_cmd_check_all(struct vmw_private *dev_priv, 3720 struct vmw_sw_context *sw_context, void *buf, 3721 uint32_t size) 3722 { 3723 int32_t cur_size = size; 3724 int ret; 3725 3726 sw_context->buf_start = buf; 3727 3728 while (cur_size > 0) { 3729 size = cur_size; 3730 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); 3731 if (unlikely(ret != 0)) 3732 return ret; 3733 buf = (void *)((unsigned long) buf + size); 3734 cur_size -= size; 3735 } 3736 3737 if (unlikely(cur_size != 0)) { 3738 VMW_DEBUG_USER("Command verifier out of sync.\n"); 3739 return -EINVAL; 3740 } 3741 3742 return 0; 3743 } 3744 3745 static void vmw_free_relocations(struct vmw_sw_context *sw_context) 3746 { 3747 /* Memory is validation context memory, so no need to free it */ 3748 INIT_LIST_HEAD(&sw_context->bo_relocations); 3749 } 3750 3751 static void vmw_apply_relocations(struct vmw_sw_context *sw_context) 3752 { 3753 struct vmw_relocation *reloc; 3754 struct ttm_buffer_object *bo; 3755 3756 list_for_each_entry(reloc, &sw_context->bo_relocations, head) { 3757 bo = &reloc->vbo->tbo; 3758 switch (bo->resource->mem_type) { 3759 case TTM_PL_VRAM: 3760 reloc->location->offset += bo->resource->start << PAGE_SHIFT; 3761 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; 3762 break; 3763 case VMW_PL_GMR: 3764 reloc->location->gmrId = bo->resource->start; 3765 break; 3766 case VMW_PL_MOB: 3767 *reloc->mob_loc = bo->resource->start; 3768 break; 3769 default: 3770 BUG(); 3771 } 3772 } 3773 vmw_free_relocations(sw_context); 3774 } 3775 3776 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, 3777 uint32_t size) 3778 { 3779 if (likely(sw_context->cmd_bounce_size >= size)) 3780 return 0; 3781 3782 if (sw_context->cmd_bounce_size == 0) 3783 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE; 3784 3785 while (sw_context->cmd_bounce_size < size) { 3786 sw_context->cmd_bounce_size = 3787 PAGE_ALIGN(sw_context->cmd_bounce_size + 3788 (sw_context->cmd_bounce_size >> 1)); 3789 } 3790 3791 vfree(sw_context->cmd_bounce); 3792 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size); 3793 3794 if (sw_context->cmd_bounce == NULL) { 3795 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n"); 3796 sw_context->cmd_bounce_size = 0; 3797 return -ENOMEM; 3798 } 3799 3800 return 0; 3801 } 3802 3803 /* 3804 * vmw_execbuf_fence_commands - create and submit a command stream fence 3805 * 3806 * Creates a fence object and submits a command stream marker. 3807 * If this fails for some reason, We sync the fifo and return NULL. 3808 * It is then safe to fence buffers with a NULL pointer. 3809 * 3810 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a 3811 * userspace handle if @p_handle is not NULL, otherwise not. 3812 */ 3813 3814 int vmw_execbuf_fence_commands(struct drm_file *file_priv, 3815 struct vmw_private *dev_priv, 3816 struct vmw_fence_obj **p_fence, 3817 uint32_t *p_handle) 3818 { 3819 uint32_t sequence; 3820 int ret; 3821 bool synced = false; 3822 3823 /* p_handle implies file_priv. */ 3824 BUG_ON(p_handle != NULL && file_priv == NULL); 3825 3826 ret = vmw_cmd_send_fence(dev_priv, &sequence); 3827 if (unlikely(ret != 0)) { 3828 VMW_DEBUG_USER("Fence submission error. Syncing.\n"); 3829 synced = true; 3830 } 3831 3832 if (p_handle != NULL) 3833 ret = vmw_user_fence_create(file_priv, dev_priv->fman, 3834 sequence, p_fence, p_handle); 3835 else 3836 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence); 3837 3838 if (unlikely(ret != 0 && !synced)) { 3839 (void) vmw_fallback_wait(dev_priv, false, false, sequence, 3840 false, VMW_FENCE_WAIT_TIMEOUT); 3841 *p_fence = NULL; 3842 } 3843 3844 return ret; 3845 } 3846 3847 /** 3848 * vmw_execbuf_copy_fence_user - copy fence object information to user-space. 3849 * 3850 * @dev_priv: Pointer to a vmw_private struct. 3851 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file. 3852 * @ret: Return value from fence object creation. 3853 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which 3854 * the information should be copied. 3855 * @fence: Pointer to the fenc object. 3856 * @fence_handle: User-space fence handle. 3857 * @out_fence_fd: exported file descriptor for the fence. -1 if not used 3858 * 3859 * This function copies fence information to user-space. If copying fails, the 3860 * user-space struct drm_vmw_fence_rep::error member is hopefully left 3861 * untouched, and if it's preloaded with an -EFAULT by user-space, the error 3862 * will hopefully be detected. 3863 * 3864 * Also if copying fails, user-space will be unable to signal the fence object 3865 * so we wait for it immediately, and then unreference the user-space reference. 3866 */ 3867 int 3868 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, 3869 struct vmw_fpriv *vmw_fp, int ret, 3870 struct drm_vmw_fence_rep __user *user_fence_rep, 3871 struct vmw_fence_obj *fence, uint32_t fence_handle, 3872 int32_t out_fence_fd) 3873 { 3874 struct drm_vmw_fence_rep fence_rep; 3875 3876 if (user_fence_rep == NULL) 3877 return 0; 3878 3879 memset(&fence_rep, 0, sizeof(fence_rep)); 3880 3881 fence_rep.error = ret; 3882 fence_rep.fd = out_fence_fd; 3883 if (ret == 0) { 3884 BUG_ON(fence == NULL); 3885 3886 fence_rep.handle = fence_handle; 3887 fence_rep.seqno = fence->base.seqno; 3888 vmw_update_seqno(dev_priv); 3889 fence_rep.passed_seqno = dev_priv->last_read_seqno; 3890 } 3891 3892 /* 3893 * copy_to_user errors will be detected by user space not seeing 3894 * fence_rep::error filled in. Typically user-space would have pre-set 3895 * that member to -EFAULT. 3896 */ 3897 ret = copy_to_user(user_fence_rep, &fence_rep, 3898 sizeof(fence_rep)); 3899 3900 /* 3901 * User-space lost the fence object. We need to sync and unreference the 3902 * handle. 3903 */ 3904 if (unlikely(ret != 0) && (fence_rep.error == 0)) { 3905 ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle); 3906 VMW_DEBUG_USER("Fence copy error. Syncing.\n"); 3907 (void) vmw_fence_obj_wait(fence, false, false, 3908 VMW_FENCE_WAIT_TIMEOUT); 3909 } 3910 3911 return ret ? -EFAULT : 0; 3912 } 3913 3914 /** 3915 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo. 3916 * 3917 * @dev_priv: Pointer to a device private structure. 3918 * @kernel_commands: Pointer to the unpatched command batch. 3919 * @command_size: Size of the unpatched command batch. 3920 * @sw_context: Structure holding the relocation lists. 3921 * 3922 * Side effects: If this function returns 0, then the command batch pointed to 3923 * by @kernel_commands will have been modified. 3924 */ 3925 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv, 3926 void *kernel_commands, u32 command_size, 3927 struct vmw_sw_context *sw_context) 3928 { 3929 void *cmd; 3930 3931 if (sw_context->dx_ctx_node) 3932 cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size, 3933 sw_context->dx_ctx_node->ctx->id); 3934 else 3935 cmd = VMW_CMD_RESERVE(dev_priv, command_size); 3936 3937 if (!cmd) 3938 return -ENOMEM; 3939 3940 vmw_apply_relocations(sw_context); 3941 memcpy(cmd, kernel_commands, command_size); 3942 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); 3943 vmw_resource_relocations_free(&sw_context->res_relocations); 3944 vmw_cmd_commit(dev_priv, command_size); 3945 3946 return 0; 3947 } 3948 3949 /** 3950 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the 3951 * command buffer manager. 3952 * 3953 * @dev_priv: Pointer to a device private structure. 3954 * @header: Opaque handle to the command buffer allocation. 3955 * @command_size: Size of the unpatched command batch. 3956 * @sw_context: Structure holding the relocation lists. 3957 * 3958 * Side effects: If this function returns 0, then the command buffer represented 3959 * by @header will have been modified. 3960 */ 3961 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv, 3962 struct vmw_cmdbuf_header *header, 3963 u32 command_size, 3964 struct vmw_sw_context *sw_context) 3965 { 3966 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id : 3967 SVGA3D_INVALID_ID); 3968 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false, 3969 header); 3970 3971 vmw_apply_relocations(sw_context); 3972 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); 3973 vmw_resource_relocations_free(&sw_context->res_relocations); 3974 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false); 3975 3976 return 0; 3977 } 3978 3979 /** 3980 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for 3981 * submission using a command buffer. 3982 * 3983 * @dev_priv: Pointer to a device private structure. 3984 * @user_commands: User-space pointer to the commands to be submitted. 3985 * @command_size: Size of the unpatched command batch. 3986 * @header: Out parameter returning the opaque pointer to the command buffer. 3987 * 3988 * This function checks whether we can use the command buffer manager for 3989 * submission and if so, creates a command buffer of suitable size and copies 3990 * the user data into that buffer. 3991 * 3992 * On successful return, the function returns a pointer to the data in the 3993 * command buffer and *@header is set to non-NULL. 3994 * 3995 * @kernel_commands: If command buffers could not be used, the function will 3996 * return the value of @kernel_commands on function call. That value may be 3997 * NULL. In that case, the value of *@header will be set to NULL. 3998 * 3999 * If an error is encountered, the function will return a pointer error value. 4000 * If the function is interrupted by a signal while sleeping, it will return 4001 * -ERESTARTSYS casted to a pointer error value. 4002 */ 4003 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv, 4004 void __user *user_commands, 4005 void *kernel_commands, u32 command_size, 4006 struct vmw_cmdbuf_header **header) 4007 { 4008 size_t cmdbuf_size; 4009 int ret; 4010 4011 *header = NULL; 4012 if (command_size > SVGA_CB_MAX_SIZE) { 4013 VMW_DEBUG_USER("Command buffer is too large.\n"); 4014 return ERR_PTR(-EINVAL); 4015 } 4016 4017 if (!dev_priv->cman || kernel_commands) 4018 return kernel_commands; 4019 4020 /* If possible, add a little space for fencing. */ 4021 cmdbuf_size = command_size + 512; 4022 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE); 4023 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true, 4024 header); 4025 if (IS_ERR(kernel_commands)) 4026 return kernel_commands; 4027 4028 ret = copy_from_user(kernel_commands, user_commands, command_size); 4029 if (ret) { 4030 VMW_DEBUG_USER("Failed copying commands.\n"); 4031 vmw_cmdbuf_header_free(*header); 4032 *header = NULL; 4033 return ERR_PTR(-EFAULT); 4034 } 4035 4036 return kernel_commands; 4037 } 4038 4039 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv, 4040 struct vmw_sw_context *sw_context, 4041 uint32_t handle) 4042 { 4043 struct vmw_resource *res; 4044 int ret; 4045 unsigned int size; 4046 4047 if (handle == SVGA3D_INVALID_ID) 4048 return 0; 4049 4050 size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context); 4051 ret = vmw_validation_preload_res(sw_context->ctx, size); 4052 if (ret) 4053 return ret; 4054 4055 ret = vmw_user_resource_lookup_handle 4056 (dev_priv, sw_context->fp->tfile, handle, 4057 user_context_converter, &res); 4058 if (ret != 0) { 4059 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n", 4060 (unsigned int) handle); 4061 return ret; 4062 } 4063 4064 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET, 4065 vmw_val_add_flag_none); 4066 if (unlikely(ret != 0)) { 4067 vmw_resource_unreference(&res); 4068 return ret; 4069 } 4070 4071 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res); 4072 sw_context->man = vmw_context_res_man(res); 4073 4074 vmw_resource_unreference(&res); 4075 return 0; 4076 } 4077 4078 int vmw_execbuf_process(struct drm_file *file_priv, 4079 struct vmw_private *dev_priv, 4080 void __user *user_commands, void *kernel_commands, 4081 uint32_t command_size, uint64_t throttle_us, 4082 uint32_t dx_context_handle, 4083 struct drm_vmw_fence_rep __user *user_fence_rep, 4084 struct vmw_fence_obj **out_fence, uint32_t flags) 4085 { 4086 struct vmw_sw_context *sw_context = &dev_priv->ctx; 4087 struct vmw_fence_obj *fence = NULL; 4088 struct vmw_cmdbuf_header *header; 4089 uint32_t handle = 0; 4090 int ret; 4091 int32_t out_fence_fd = -1; 4092 struct sync_file *sync_file = NULL; 4093 DECLARE_VAL_CONTEXT(val_ctx, sw_context, 1); 4094 4095 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { 4096 out_fence_fd = get_unused_fd_flags(O_CLOEXEC); 4097 if (out_fence_fd < 0) { 4098 VMW_DEBUG_USER("Failed to get a fence fd.\n"); 4099 return out_fence_fd; 4100 } 4101 } 4102 4103 if (throttle_us) { 4104 VMW_DEBUG_USER("Throttling is no longer supported.\n"); 4105 } 4106 4107 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands, 4108 kernel_commands, command_size, 4109 &header); 4110 if (IS_ERR(kernel_commands)) { 4111 ret = PTR_ERR(kernel_commands); 4112 goto out_free_fence_fd; 4113 } 4114 4115 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); 4116 if (ret) { 4117 ret = -ERESTARTSYS; 4118 goto out_free_header; 4119 } 4120 4121 sw_context->kernel = false; 4122 if (kernel_commands == NULL) { 4123 ret = vmw_resize_cmd_bounce(sw_context, command_size); 4124 if (unlikely(ret != 0)) 4125 goto out_unlock; 4126 4127 ret = copy_from_user(sw_context->cmd_bounce, user_commands, 4128 command_size); 4129 if (unlikely(ret != 0)) { 4130 ret = -EFAULT; 4131 VMW_DEBUG_USER("Failed copying commands.\n"); 4132 goto out_unlock; 4133 } 4134 4135 kernel_commands = sw_context->cmd_bounce; 4136 } else if (!header) { 4137 sw_context->kernel = true; 4138 } 4139 4140 sw_context->filp = file_priv; 4141 sw_context->fp = vmw_fpriv(file_priv); 4142 INIT_LIST_HEAD(&sw_context->ctx_list); 4143 sw_context->cur_query_bo = dev_priv->pinned_bo; 4144 sw_context->last_query_ctx = NULL; 4145 sw_context->needs_post_query_barrier = false; 4146 sw_context->dx_ctx_node = NULL; 4147 sw_context->dx_query_mob = NULL; 4148 sw_context->dx_query_ctx = NULL; 4149 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); 4150 INIT_LIST_HEAD(&sw_context->res_relocations); 4151 INIT_LIST_HEAD(&sw_context->bo_relocations); 4152 4153 if (sw_context->staged_bindings) 4154 vmw_binding_state_reset(sw_context->staged_bindings); 4155 4156 INIT_LIST_HEAD(&sw_context->staged_cmd_res); 4157 sw_context->ctx = &val_ctx; 4158 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle); 4159 if (unlikely(ret != 0)) 4160 goto out_err_nores; 4161 4162 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, 4163 command_size); 4164 if (unlikely(ret != 0)) 4165 goto out_err_nores; 4166 4167 ret = vmw_resources_reserve(sw_context); 4168 if (unlikely(ret != 0)) 4169 goto out_err_nores; 4170 4171 ret = vmw_validation_bo_reserve(&val_ctx, true); 4172 if (unlikely(ret != 0)) 4173 goto out_err_nores; 4174 4175 ret = vmw_validation_bo_validate(&val_ctx, true); 4176 if (unlikely(ret != 0)) 4177 goto out_err; 4178 4179 ret = vmw_validation_res_validate(&val_ctx, true); 4180 if (unlikely(ret != 0)) 4181 goto out_err; 4182 4183 vmw_validation_drop_ht(&val_ctx); 4184 4185 ret = mutex_lock_interruptible(&dev_priv->binding_mutex); 4186 if (unlikely(ret != 0)) { 4187 ret = -ERESTARTSYS; 4188 goto out_err; 4189 } 4190 4191 if (dev_priv->has_mob) { 4192 ret = vmw_rebind_contexts(sw_context); 4193 if (unlikely(ret != 0)) 4194 goto out_unlock_binding; 4195 } 4196 4197 if (!header) { 4198 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands, 4199 command_size, sw_context); 4200 } else { 4201 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size, 4202 sw_context); 4203 header = NULL; 4204 } 4205 mutex_unlock(&dev_priv->binding_mutex); 4206 if (ret) 4207 goto out_err; 4208 4209 vmw_query_bo_switch_commit(dev_priv, sw_context); 4210 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence, 4211 (user_fence_rep) ? &handle : NULL); 4212 /* 4213 * This error is harmless, because if fence submission fails, 4214 * vmw_fifo_send_fence will sync. The error will be propagated to 4215 * user-space in @fence_rep 4216 */ 4217 if (ret != 0) 4218 VMW_DEBUG_USER("Fence submission error. Syncing.\n"); 4219 4220 vmw_execbuf_bindings_commit(sw_context, false); 4221 vmw_bind_dx_query_mob(sw_context); 4222 vmw_validation_res_unreserve(&val_ctx, false); 4223 4224 vmw_validation_bo_fence(sw_context->ctx, fence); 4225 4226 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid)) 4227 __vmw_execbuf_release_pinned_bo(dev_priv, fence); 4228 4229 /* 4230 * If anything fails here, give up trying to export the fence and do a 4231 * sync since the user mode will not be able to sync the fence itself. 4232 * This ensures we are still functionally correct. 4233 */ 4234 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { 4235 4236 sync_file = sync_file_create(&fence->base); 4237 if (!sync_file) { 4238 VMW_DEBUG_USER("Sync file create failed for fence\n"); 4239 put_unused_fd(out_fence_fd); 4240 out_fence_fd = -1; 4241 4242 (void) vmw_fence_obj_wait(fence, false, false, 4243 VMW_FENCE_WAIT_TIMEOUT); 4244 } 4245 } 4246 4247 ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, 4248 user_fence_rep, fence, handle, out_fence_fd); 4249 4250 if (sync_file) { 4251 if (ret) { 4252 /* usercopy of fence failed, put the file object */ 4253 fput(sync_file->file); 4254 put_unused_fd(out_fence_fd); 4255 } else { 4256 /* Link the fence with the FD created earlier */ 4257 fd_install(out_fence_fd, sync_file->file); 4258 } 4259 } 4260 4261 /* Don't unreference when handing fence out */ 4262 if (unlikely(out_fence != NULL)) { 4263 *out_fence = fence; 4264 fence = NULL; 4265 } else if (likely(fence != NULL)) { 4266 vmw_fence_obj_unreference(&fence); 4267 } 4268 4269 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res); 4270 mutex_unlock(&dev_priv->cmdbuf_mutex); 4271 4272 /* 4273 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks 4274 * in resource destruction paths. 4275 */ 4276 vmw_validation_unref_lists(&val_ctx); 4277 4278 return ret; 4279 4280 out_unlock_binding: 4281 mutex_unlock(&dev_priv->binding_mutex); 4282 out_err: 4283 vmw_validation_bo_backoff(&val_ctx); 4284 out_err_nores: 4285 vmw_execbuf_bindings_commit(sw_context, true); 4286 vmw_validation_res_unreserve(&val_ctx, true); 4287 vmw_resource_relocations_free(&sw_context->res_relocations); 4288 vmw_free_relocations(sw_context); 4289 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid)) 4290 __vmw_execbuf_release_pinned_bo(dev_priv, NULL); 4291 out_unlock: 4292 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res); 4293 vmw_validation_drop_ht(&val_ctx); 4294 WARN_ON(!list_empty(&sw_context->ctx_list)); 4295 mutex_unlock(&dev_priv->cmdbuf_mutex); 4296 4297 /* 4298 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks 4299 * in resource destruction paths. 4300 */ 4301 vmw_validation_unref_lists(&val_ctx); 4302 out_free_header: 4303 if (header) 4304 vmw_cmdbuf_header_free(header); 4305 out_free_fence_fd: 4306 if (out_fence_fd >= 0) 4307 put_unused_fd(out_fence_fd); 4308 4309 return ret; 4310 } 4311 4312 /** 4313 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer. 4314 * 4315 * @dev_priv: The device private structure. 4316 * 4317 * This function is called to idle the fifo and unpin the query buffer if the 4318 * normal way to do this hits an error, which should typically be extremely 4319 * rare. 4320 */ 4321 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) 4322 { 4323 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n"); 4324 4325 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); 4326 vmw_bo_pin_reserved(dev_priv->pinned_bo, false); 4327 if (dev_priv->dummy_query_bo_pinned) { 4328 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false); 4329 dev_priv->dummy_query_bo_pinned = false; 4330 } 4331 } 4332 4333 4334 /** 4335 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query 4336 * bo. 4337 * 4338 * @dev_priv: The device private structure. 4339 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a 4340 * query barrier that flushes all queries touching the current buffer pointed to 4341 * by @dev_priv->pinned_bo 4342 * 4343 * This function should be used to unpin the pinned query bo, or as a query 4344 * barrier when we need to make sure that all queries have finished before the 4345 * next fifo command. (For example on hardware context destructions where the 4346 * hardware may otherwise leak unfinished queries). 4347 * 4348 * This function does not return any failure codes, but make attempts to do safe 4349 * unpinning in case of errors. 4350 * 4351 * The function will synchronize on the previous query barrier, and will thus 4352 * not finish until that barrier has executed. 4353 * 4354 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before 4355 * calling this function. 4356 */ 4357 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, 4358 struct vmw_fence_obj *fence) 4359 { 4360 int ret = 0; 4361 struct vmw_fence_obj *lfence = NULL; 4362 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); 4363 4364 if (dev_priv->pinned_bo == NULL) 4365 goto out_unlock; 4366 4367 vmw_bo_placement_set(dev_priv->pinned_bo, 4368 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, 4369 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM); 4370 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo); 4371 if (ret) 4372 goto out_no_reserve; 4373 4374 vmw_bo_placement_set(dev_priv->dummy_query_bo, 4375 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, 4376 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM); 4377 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo); 4378 if (ret) 4379 goto out_no_reserve; 4380 4381 ret = vmw_validation_bo_reserve(&val_ctx, false); 4382 if (ret) 4383 goto out_no_reserve; 4384 4385 if (dev_priv->query_cid_valid) { 4386 BUG_ON(fence != NULL); 4387 ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid); 4388 if (ret) 4389 goto out_no_emit; 4390 dev_priv->query_cid_valid = false; 4391 } 4392 4393 vmw_bo_pin_reserved(dev_priv->pinned_bo, false); 4394 if (dev_priv->dummy_query_bo_pinned) { 4395 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false); 4396 dev_priv->dummy_query_bo_pinned = false; 4397 } 4398 if (fence == NULL) { 4399 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence, 4400 NULL); 4401 fence = lfence; 4402 } 4403 vmw_validation_bo_fence(&val_ctx, fence); 4404 if (lfence != NULL) 4405 vmw_fence_obj_unreference(&lfence); 4406 4407 vmw_validation_unref_lists(&val_ctx); 4408 vmw_bo_unreference(&dev_priv->pinned_bo); 4409 4410 out_unlock: 4411 return; 4412 out_no_emit: 4413 vmw_validation_bo_backoff(&val_ctx); 4414 out_no_reserve: 4415 vmw_validation_unref_lists(&val_ctx); 4416 vmw_execbuf_unpin_panic(dev_priv); 4417 vmw_bo_unreference(&dev_priv->pinned_bo); 4418 } 4419 4420 /** 4421 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo. 4422 * 4423 * @dev_priv: The device private structure. 4424 * 4425 * This function should be used to unpin the pinned query bo, or as a query 4426 * barrier when we need to make sure that all queries have finished before the 4427 * next fifo command. (For example on hardware context destructions where the 4428 * hardware may otherwise leak unfinished queries). 4429 * 4430 * This function does not return any failure codes, but make attempts to do safe 4431 * unpinning in case of errors. 4432 * 4433 * The function will synchronize on the previous query barrier, and will thus 4434 * not finish until that barrier has executed. 4435 */ 4436 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) 4437 { 4438 mutex_lock(&dev_priv->cmdbuf_mutex); 4439 if (dev_priv->query_cid_valid) 4440 __vmw_execbuf_release_pinned_bo(dev_priv, NULL); 4441 mutex_unlock(&dev_priv->cmdbuf_mutex); 4442 } 4443 4444 int vmw_execbuf_ioctl(struct drm_device *dev, void *data, 4445 struct drm_file *file_priv) 4446 { 4447 struct vmw_private *dev_priv = vmw_priv(dev); 4448 struct drm_vmw_execbuf_arg *arg = data; 4449 int ret; 4450 struct dma_fence *in_fence = NULL; 4451 4452 MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF); 4453 MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF); 4454 4455 /* 4456 * Extend the ioctl argument while maintaining backwards compatibility: 4457 * We take different code paths depending on the value of arg->version. 4458 * 4459 * Note: The ioctl argument is extended and zeropadded by core DRM. 4460 */ 4461 if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION || 4462 arg->version == 0)) { 4463 VMW_DEBUG_USER("Incorrect execbuf version.\n"); 4464 ret = -EINVAL; 4465 goto mksstats_out; 4466 } 4467 4468 switch (arg->version) { 4469 case 1: 4470 /* For v1 core DRM have extended + zeropadded the data */ 4471 arg->context_handle = (uint32_t) -1; 4472 break; 4473 case 2: 4474 default: 4475 /* For v2 and later core DRM would have correctly copied it */ 4476 break; 4477 } 4478 4479 /* If imported a fence FD from elsewhere, then wait on it */ 4480 if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) { 4481 in_fence = sync_file_get_fence(arg->imported_fence_fd); 4482 4483 if (!in_fence) { 4484 VMW_DEBUG_USER("Cannot get imported fence\n"); 4485 ret = -EINVAL; 4486 goto mksstats_out; 4487 } 4488 4489 ret = dma_fence_wait(in_fence, true); 4490 if (ret) 4491 goto out; 4492 } 4493 4494 ret = vmw_execbuf_process(file_priv, dev_priv, 4495 (void __user *)(unsigned long)arg->commands, 4496 NULL, arg->command_size, arg->throttle_us, 4497 arg->context_handle, 4498 (void __user *)(unsigned long)arg->fence_rep, 4499 NULL, arg->flags); 4500 4501 if (unlikely(ret != 0)) 4502 goto out; 4503 4504 vmw_kms_cursor_post_execbuf(dev_priv); 4505 4506 out: 4507 if (in_fence) 4508 dma_fence_put(in_fence); 4509 4510 mksstats_out: 4511 MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF); 4512 return ret; 4513 } 4514