1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright 2009 - 2022 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 #include <linux/sync_file.h> 28 #include <linux/hashtable.h> 29 30 #include "vmwgfx_drv.h" 31 #include "vmwgfx_reg.h" 32 #include <drm/ttm/ttm_bo_api.h> 33 #include <drm/ttm/ttm_placement.h> 34 #include "vmwgfx_so.h" 35 #include "vmwgfx_binding.h" 36 #include "vmwgfx_mksstat.h" 37 38 39 /* 40 * Helper macro to get dx_ctx_node if available otherwise print an error 41 * message. This is for use in command verifier function where if dx_ctx_node 42 * is not set then command is invalid. 43 */ 44 #define VMW_GET_CTX_NODE(__sw_context) \ 45 ({ \ 46 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \ 47 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \ 48 __sw_context->dx_ctx_node; \ 49 }); \ 50 }) 51 52 #define VMW_DECLARE_CMD_VAR(__var, __type) \ 53 struct { \ 54 SVGA3dCmdHeader header; \ 55 __type body; \ 56 } __var 57 58 /** 59 * struct vmw_relocation - Buffer object relocation 60 * 61 * @head: List head for the command submission context's relocation list 62 * @vbo: Non ref-counted pointer to buffer object 63 * @mob_loc: Pointer to location for mob id to be modified 64 * @location: Pointer to location for guest pointer to be modified 65 */ 66 struct vmw_relocation { 67 struct list_head head; 68 struct vmw_buffer_object *vbo; 69 union { 70 SVGAMobId *mob_loc; 71 SVGAGuestPtr *location; 72 }; 73 }; 74 75 /** 76 * enum vmw_resource_relocation_type - Relocation type for resources 77 * 78 * @vmw_res_rel_normal: Traditional relocation. The resource id in the 79 * command stream is replaced with the actual id after validation. 80 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced 81 * with a NOP. 82 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after 83 * validation is -1, the command is replaced with a NOP. Otherwise no action. 84 * @vmw_res_rel_max: Last value in the enum - used for error checking 85 */ 86 enum vmw_resource_relocation_type { 87 vmw_res_rel_normal, 88 vmw_res_rel_nop, 89 vmw_res_rel_cond_nop, 90 vmw_res_rel_max 91 }; 92 93 /** 94 * struct vmw_resource_relocation - Relocation info for resources 95 * 96 * @head: List head for the software context's relocation list. 97 * @res: Non-ref-counted pointer to the resource. 98 * @offset: Offset of single byte entries into the command buffer where the id 99 * that needs fixup is located. 100 * @rel_type: Type of relocation. 101 */ 102 struct vmw_resource_relocation { 103 struct list_head head; 104 const struct vmw_resource *res; 105 u32 offset:29; 106 enum vmw_resource_relocation_type rel_type:3; 107 }; 108 109 /** 110 * struct vmw_ctx_validation_info - Extra validation metadata for contexts 111 * 112 * @head: List head of context list 113 * @ctx: The context resource 114 * @cur: The context's persistent binding state 115 * @staged: The binding state changes of this command buffer 116 */ 117 struct vmw_ctx_validation_info { 118 struct list_head head; 119 struct vmw_resource *ctx; 120 struct vmw_ctx_binding_state *cur; 121 struct vmw_ctx_binding_state *staged; 122 }; 123 124 /** 125 * struct vmw_cmd_entry - Describe a command for the verifier 126 * 127 * @func: Call-back to handle the command. 128 * @user_allow: Whether allowed from the execbuf ioctl. 129 * @gb_disable: Whether disabled if guest-backed objects are available. 130 * @gb_enable: Whether enabled iff guest-backed objects are available. 131 * @cmd_name: Name of the command. 132 */ 133 struct vmw_cmd_entry { 134 int (*func) (struct vmw_private *, struct vmw_sw_context *, 135 SVGA3dCmdHeader *); 136 bool user_allow; 137 bool gb_disable; 138 bool gb_enable; 139 const char *cmd_name; 140 }; 141 142 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ 143 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ 144 (_gb_disable), (_gb_enable), #_cmd} 145 146 static int vmw_resource_context_res_add(struct vmw_private *dev_priv, 147 struct vmw_sw_context *sw_context, 148 struct vmw_resource *ctx); 149 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, 150 struct vmw_sw_context *sw_context, 151 SVGAMobId *id, 152 struct vmw_buffer_object **vmw_bo_p); 153 /** 154 * vmw_ptr_diff - Compute the offset from a to b in bytes 155 * 156 * @a: A starting pointer. 157 * @b: A pointer offset in the same address space. 158 * 159 * Returns: The offset in bytes between the two pointers. 160 */ 161 static size_t vmw_ptr_diff(void *a, void *b) 162 { 163 return (unsigned long) b - (unsigned long) a; 164 } 165 166 /** 167 * vmw_execbuf_bindings_commit - Commit modified binding state 168 * 169 * @sw_context: The command submission context 170 * @backoff: Whether this is part of the error path and binding state changes 171 * should be ignored 172 */ 173 static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context, 174 bool backoff) 175 { 176 struct vmw_ctx_validation_info *entry; 177 178 list_for_each_entry(entry, &sw_context->ctx_list, head) { 179 if (!backoff) 180 vmw_binding_state_commit(entry->cur, entry->staged); 181 182 if (entry->staged != sw_context->staged_bindings) 183 vmw_binding_state_free(entry->staged); 184 else 185 sw_context->staged_bindings_inuse = false; 186 } 187 188 /* List entries are freed with the validation context */ 189 INIT_LIST_HEAD(&sw_context->ctx_list); 190 } 191 192 /** 193 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced 194 * 195 * @sw_context: The command submission context 196 */ 197 static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context) 198 { 199 if (sw_context->dx_query_mob) 200 vmw_context_bind_dx_query(sw_context->dx_query_ctx, 201 sw_context->dx_query_mob); 202 } 203 204 /** 205 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to 206 * the validate list. 207 * 208 * @dev_priv: Pointer to the device private: 209 * @sw_context: The command submission context 210 * @res: Pointer to the resource 211 * @node: The validation node holding the context resource metadata 212 */ 213 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, 214 struct vmw_sw_context *sw_context, 215 struct vmw_resource *res, 216 struct vmw_ctx_validation_info *node) 217 { 218 int ret; 219 220 ret = vmw_resource_context_res_add(dev_priv, sw_context, res); 221 if (unlikely(ret != 0)) 222 goto out_err; 223 224 if (!sw_context->staged_bindings) { 225 sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv); 226 if (IS_ERR(sw_context->staged_bindings)) { 227 ret = PTR_ERR(sw_context->staged_bindings); 228 sw_context->staged_bindings = NULL; 229 goto out_err; 230 } 231 } 232 233 if (sw_context->staged_bindings_inuse) { 234 node->staged = vmw_binding_state_alloc(dev_priv); 235 if (IS_ERR(node->staged)) { 236 ret = PTR_ERR(node->staged); 237 node->staged = NULL; 238 goto out_err; 239 } 240 } else { 241 node->staged = sw_context->staged_bindings; 242 sw_context->staged_bindings_inuse = true; 243 } 244 245 node->ctx = res; 246 node->cur = vmw_context_binding_state(res); 247 list_add_tail(&node->head, &sw_context->ctx_list); 248 249 return 0; 250 251 out_err: 252 return ret; 253 } 254 255 /** 256 * vmw_execbuf_res_size - calculate extra size fore the resource validation node 257 * 258 * @dev_priv: Pointer to the device private struct. 259 * @res_type: The resource type. 260 * 261 * Guest-backed contexts and DX contexts require extra size to store execbuf 262 * private information in the validation node. Typically the binding manager 263 * associated data structures. 264 * 265 * Returns: The extra size requirement based on resource type. 266 */ 267 static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv, 268 enum vmw_res_type res_type) 269 { 270 return (res_type == vmw_res_dx_context || 271 (res_type == vmw_res_context && dev_priv->has_mob)) ? 272 sizeof(struct vmw_ctx_validation_info) : 0; 273 } 274 275 /** 276 * vmw_execbuf_rcache_update - Update a resource-node cache entry 277 * 278 * @rcache: Pointer to the entry to update. 279 * @res: Pointer to the resource. 280 * @private: Pointer to the execbuf-private space in the resource validation 281 * node. 282 */ 283 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache, 284 struct vmw_resource *res, 285 void *private) 286 { 287 rcache->res = res; 288 rcache->private = private; 289 rcache->valid = 1; 290 rcache->valid_handle = 0; 291 } 292 293 enum vmw_val_add_flags { 294 vmw_val_add_flag_none = 0, 295 vmw_val_add_flag_noctx = 1 << 0, 296 }; 297 298 /** 299 * vmw_execbuf_res_val_add - Add a resource to the validation list. 300 * 301 * @sw_context: Pointer to the software context. 302 * @res: Unreferenced rcu-protected pointer to the resource. 303 * @dirty: Whether to change dirty status. 304 * @flags: specifies whether to use the context or not 305 * 306 * Returns: 0 on success. Negative error code on failure. Typical error codes 307 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed. 308 */ 309 static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context, 310 struct vmw_resource *res, 311 u32 dirty, 312 u32 flags) 313 { 314 struct vmw_private *dev_priv = res->dev_priv; 315 int ret; 316 enum vmw_res_type res_type = vmw_res_type(res); 317 struct vmw_res_cache_entry *rcache; 318 struct vmw_ctx_validation_info *ctx_info; 319 bool first_usage; 320 unsigned int priv_size; 321 322 rcache = &sw_context->res_cache[res_type]; 323 if (likely(rcache->valid && rcache->res == res)) { 324 if (dirty) 325 vmw_validation_res_set_dirty(sw_context->ctx, 326 rcache->private, dirty); 327 return 0; 328 } 329 330 if ((flags & vmw_val_add_flag_noctx) != 0) { 331 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty, 332 (void **)&ctx_info, NULL); 333 if (ret) 334 return ret; 335 336 } else { 337 priv_size = vmw_execbuf_res_size(dev_priv, res_type); 338 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size, 339 dirty, (void **)&ctx_info, 340 &first_usage); 341 if (ret) 342 return ret; 343 344 if (priv_size && first_usage) { 345 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res, 346 ctx_info); 347 if (ret) { 348 VMW_DEBUG_USER("Failed first usage context setup.\n"); 349 return ret; 350 } 351 } 352 } 353 354 vmw_execbuf_rcache_update(rcache, res, ctx_info); 355 return 0; 356 } 357 358 /** 359 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the 360 * validation list 361 * 362 * @sw_context: The software context holding the validation list. 363 * @view: Pointer to the view resource. 364 * 365 * Returns 0 if success, negative error code otherwise. 366 */ 367 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context, 368 struct vmw_resource *view) 369 { 370 int ret; 371 372 /* 373 * First add the resource the view is pointing to, otherwise it may be 374 * swapped out when the view is validated. 375 */ 376 ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view), 377 vmw_view_dirtying(view), vmw_val_add_flag_noctx); 378 if (ret) 379 return ret; 380 381 return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE, 382 vmw_val_add_flag_noctx); 383 } 384 385 /** 386 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing 387 * to to the validation list. 388 * 389 * @sw_context: The software context holding the validation list. 390 * @view_type: The view type to look up. 391 * @id: view id of the view. 392 * 393 * The view is represented by a view id and the DX context it's created on, or 394 * scheduled for creation on. If there is no DX context set, the function will 395 * return an -EINVAL error pointer. 396 * 397 * Returns: Unreferenced pointer to the resource on success, negative error 398 * pointer on failure. 399 */ 400 static struct vmw_resource * 401 vmw_view_id_val_add(struct vmw_sw_context *sw_context, 402 enum vmw_view_type view_type, u32 id) 403 { 404 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 405 struct vmw_resource *view; 406 int ret; 407 408 if (!ctx_node) 409 return ERR_PTR(-EINVAL); 410 411 view = vmw_view_lookup(sw_context->man, view_type, id); 412 if (IS_ERR(view)) 413 return view; 414 415 ret = vmw_view_res_val_add(sw_context, view); 416 if (ret) 417 return ERR_PTR(ret); 418 419 return view; 420 } 421 422 /** 423 * vmw_resource_context_res_add - Put resources previously bound to a context on 424 * the validation list 425 * 426 * @dev_priv: Pointer to a device private structure 427 * @sw_context: Pointer to a software context used for this command submission 428 * @ctx: Pointer to the context resource 429 * 430 * This function puts all resources that were previously bound to @ctx on the 431 * resource validation list. This is part of the context state reemission 432 */ 433 static int vmw_resource_context_res_add(struct vmw_private *dev_priv, 434 struct vmw_sw_context *sw_context, 435 struct vmw_resource *ctx) 436 { 437 struct list_head *binding_list; 438 struct vmw_ctx_bindinfo *entry; 439 int ret = 0; 440 struct vmw_resource *res; 441 u32 i; 442 u32 cotable_max = has_sm5_context(ctx->dev_priv) ? 443 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX; 444 445 /* Add all cotables to the validation list. */ 446 if (has_sm4_context(dev_priv) && 447 vmw_res_type(ctx) == vmw_res_dx_context) { 448 for (i = 0; i < cotable_max; ++i) { 449 res = vmw_context_cotable(ctx, i); 450 if (IS_ERR(res)) 451 continue; 452 453 ret = vmw_execbuf_res_val_add(sw_context, res, 454 VMW_RES_DIRTY_SET, 455 vmw_val_add_flag_noctx); 456 if (unlikely(ret != 0)) 457 return ret; 458 } 459 } 460 461 /* Add all resources bound to the context to the validation list */ 462 mutex_lock(&dev_priv->binding_mutex); 463 binding_list = vmw_context_binding_list(ctx); 464 465 list_for_each_entry(entry, binding_list, ctx_list) { 466 if (vmw_res_type(entry->res) == vmw_res_view) 467 ret = vmw_view_res_val_add(sw_context, entry->res); 468 else 469 ret = vmw_execbuf_res_val_add(sw_context, entry->res, 470 vmw_binding_dirtying(entry->bt), 471 vmw_val_add_flag_noctx); 472 if (unlikely(ret != 0)) 473 break; 474 } 475 476 if (has_sm4_context(dev_priv) && 477 vmw_res_type(ctx) == vmw_res_dx_context) { 478 struct vmw_buffer_object *dx_query_mob; 479 480 dx_query_mob = vmw_context_get_dx_query_mob(ctx); 481 if (dx_query_mob) 482 ret = vmw_validation_add_bo(sw_context->ctx, 483 dx_query_mob, true, false); 484 } 485 486 mutex_unlock(&dev_priv->binding_mutex); 487 return ret; 488 } 489 490 /** 491 * vmw_resource_relocation_add - Add a relocation to the relocation list 492 * 493 * @sw_context: Pointer to the software context. 494 * @res: The resource. 495 * @offset: Offset into the command buffer currently being parsed where the id 496 * that needs fixup is located. Granularity is one byte. 497 * @rel_type: Relocation type. 498 */ 499 static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context, 500 const struct vmw_resource *res, 501 unsigned long offset, 502 enum vmw_resource_relocation_type 503 rel_type) 504 { 505 struct vmw_resource_relocation *rel; 506 507 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel)); 508 if (unlikely(!rel)) { 509 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n"); 510 return -ENOMEM; 511 } 512 513 rel->res = res; 514 rel->offset = offset; 515 rel->rel_type = rel_type; 516 list_add_tail(&rel->head, &sw_context->res_relocations); 517 518 return 0; 519 } 520 521 /** 522 * vmw_resource_relocations_free - Free all relocations on a list 523 * 524 * @list: Pointer to the head of the relocation list 525 */ 526 static void vmw_resource_relocations_free(struct list_head *list) 527 { 528 /* Memory is validation context memory, so no need to free it */ 529 INIT_LIST_HEAD(list); 530 } 531 532 /** 533 * vmw_resource_relocations_apply - Apply all relocations on a list 534 * 535 * @cb: Pointer to the start of the command buffer bein patch. This need not be 536 * the same buffer as the one being parsed when the relocation list was built, 537 * but the contents must be the same modulo the resource ids. 538 * @list: Pointer to the head of the relocation list. 539 */ 540 static void vmw_resource_relocations_apply(uint32_t *cb, 541 struct list_head *list) 542 { 543 struct vmw_resource_relocation *rel; 544 545 /* Validate the struct vmw_resource_relocation member size */ 546 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29)); 547 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3)); 548 549 list_for_each_entry(rel, list, head) { 550 u32 *addr = (u32 *)((unsigned long) cb + rel->offset); 551 switch (rel->rel_type) { 552 case vmw_res_rel_normal: 553 *addr = rel->res->id; 554 break; 555 case vmw_res_rel_nop: 556 *addr = SVGA_3D_CMD_NOP; 557 break; 558 default: 559 if (rel->res->id == -1) 560 *addr = SVGA_3D_CMD_NOP; 561 break; 562 } 563 } 564 } 565 566 static int vmw_cmd_invalid(struct vmw_private *dev_priv, 567 struct vmw_sw_context *sw_context, 568 SVGA3dCmdHeader *header) 569 { 570 return -EINVAL; 571 } 572 573 static int vmw_cmd_ok(struct vmw_private *dev_priv, 574 struct vmw_sw_context *sw_context, 575 SVGA3dCmdHeader *header) 576 { 577 return 0; 578 } 579 580 /** 581 * vmw_resources_reserve - Reserve all resources on the sw_context's resource 582 * list. 583 * 584 * @sw_context: Pointer to the software context. 585 * 586 * Note that since vmware's command submission currently is protected by the 587 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since 588 * only a single thread at once will attempt this. 589 */ 590 static int vmw_resources_reserve(struct vmw_sw_context *sw_context) 591 { 592 int ret; 593 594 ret = vmw_validation_res_reserve(sw_context->ctx, true); 595 if (ret) 596 return ret; 597 598 if (sw_context->dx_query_mob) { 599 struct vmw_buffer_object *expected_dx_query_mob; 600 601 expected_dx_query_mob = 602 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx); 603 if (expected_dx_query_mob && 604 expected_dx_query_mob != sw_context->dx_query_mob) { 605 ret = -EINVAL; 606 } 607 } 608 609 return ret; 610 } 611 612 /** 613 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the 614 * resource validate list unless it's already there. 615 * 616 * @dev_priv: Pointer to a device private structure. 617 * @sw_context: Pointer to the software context. 618 * @res_type: Resource type. 619 * @dirty: Whether to change dirty status. 620 * @converter: User-space visisble type specific information. 621 * @id_loc: Pointer to the location in the command buffer currently being parsed 622 * from where the user-space resource id handle is located. 623 * @p_res: Pointer to pointer to resource validalidation node. Populated on 624 * exit. 625 */ 626 static int 627 vmw_cmd_res_check(struct vmw_private *dev_priv, 628 struct vmw_sw_context *sw_context, 629 enum vmw_res_type res_type, 630 u32 dirty, 631 const struct vmw_user_resource_conv *converter, 632 uint32_t *id_loc, 633 struct vmw_resource **p_res) 634 { 635 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type]; 636 struct vmw_resource *res; 637 int ret = 0; 638 bool needs_unref = false; 639 640 if (p_res) 641 *p_res = NULL; 642 643 if (*id_loc == SVGA3D_INVALID_ID) { 644 if (res_type == vmw_res_context) { 645 VMW_DEBUG_USER("Illegal context invalid id.\n"); 646 return -EINVAL; 647 } 648 return 0; 649 } 650 651 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) { 652 res = rcache->res; 653 if (dirty) 654 vmw_validation_res_set_dirty(sw_context->ctx, 655 rcache->private, dirty); 656 } else { 657 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type); 658 659 ret = vmw_validation_preload_res(sw_context->ctx, size); 660 if (ret) 661 return ret; 662 663 ret = vmw_user_resource_lookup_handle 664 (dev_priv, sw_context->fp->tfile, *id_loc, converter, &res); 665 if (ret != 0) { 666 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n", 667 (unsigned int) *id_loc); 668 return ret; 669 } 670 needs_unref = true; 671 672 ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none); 673 if (unlikely(ret != 0)) 674 goto res_check_done; 675 676 if (rcache->valid && rcache->res == res) { 677 rcache->valid_handle = true; 678 rcache->handle = *id_loc; 679 } 680 } 681 682 ret = vmw_resource_relocation_add(sw_context, res, 683 vmw_ptr_diff(sw_context->buf_start, 684 id_loc), 685 vmw_res_rel_normal); 686 if (p_res) 687 *p_res = res; 688 689 res_check_done: 690 if (needs_unref) 691 vmw_resource_unreference(&res); 692 693 return ret; 694 } 695 696 /** 697 * vmw_rebind_all_dx_query - Rebind DX query associated with the context 698 * 699 * @ctx_res: context the query belongs to 700 * 701 * This function assumes binding_mutex is held. 702 */ 703 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) 704 { 705 struct vmw_private *dev_priv = ctx_res->dev_priv; 706 struct vmw_buffer_object *dx_query_mob; 707 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery); 708 709 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res); 710 711 if (!dx_query_mob || dx_query_mob->dx_query_ctx) 712 return 0; 713 714 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id); 715 if (cmd == NULL) 716 return -ENOMEM; 717 718 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY; 719 cmd->header.size = sizeof(cmd->body); 720 cmd->body.cid = ctx_res->id; 721 cmd->body.mobid = dx_query_mob->base.resource->start; 722 vmw_cmd_commit(dev_priv, sizeof(*cmd)); 723 724 vmw_context_bind_dx_query(ctx_res, dx_query_mob); 725 726 return 0; 727 } 728 729 /** 730 * vmw_rebind_contexts - Rebind all resources previously bound to referenced 731 * contexts. 732 * 733 * @sw_context: Pointer to the software context. 734 * 735 * Rebind context binding points that have been scrubbed because of eviction. 736 */ 737 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) 738 { 739 struct vmw_ctx_validation_info *val; 740 int ret; 741 742 list_for_each_entry(val, &sw_context->ctx_list, head) { 743 ret = vmw_binding_rebind_all(val->cur); 744 if (unlikely(ret != 0)) { 745 if (ret != -ERESTARTSYS) 746 VMW_DEBUG_USER("Failed to rebind context.\n"); 747 return ret; 748 } 749 750 ret = vmw_rebind_all_dx_query(val->ctx); 751 if (ret != 0) { 752 VMW_DEBUG_USER("Failed to rebind queries.\n"); 753 return ret; 754 } 755 } 756 757 return 0; 758 } 759 760 /** 761 * vmw_view_bindings_add - Add an array of view bindings to a context binding 762 * state tracker. 763 * 764 * @sw_context: The execbuf state used for this command. 765 * @view_type: View type for the bindings. 766 * @binding_type: Binding type for the bindings. 767 * @shader_slot: The shader slot to user for the bindings. 768 * @view_ids: Array of view ids to be bound. 769 * @num_views: Number of view ids in @view_ids. 770 * @first_slot: The binding slot to be used for the first view id in @view_ids. 771 */ 772 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context, 773 enum vmw_view_type view_type, 774 enum vmw_ctx_binding_type binding_type, 775 uint32 shader_slot, 776 uint32 view_ids[], u32 num_views, 777 u32 first_slot) 778 { 779 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 780 u32 i; 781 782 if (!ctx_node) 783 return -EINVAL; 784 785 for (i = 0; i < num_views; ++i) { 786 struct vmw_ctx_bindinfo_view binding; 787 struct vmw_resource *view = NULL; 788 789 if (view_ids[i] != SVGA3D_INVALID_ID) { 790 view = vmw_view_id_val_add(sw_context, view_type, 791 view_ids[i]); 792 if (IS_ERR(view)) { 793 VMW_DEBUG_USER("View not found.\n"); 794 return PTR_ERR(view); 795 } 796 } 797 binding.bi.ctx = ctx_node->ctx; 798 binding.bi.res = view; 799 binding.bi.bt = binding_type; 800 binding.shader_slot = shader_slot; 801 binding.slot = first_slot + i; 802 vmw_binding_add(ctx_node->staged, &binding.bi, 803 shader_slot, binding.slot); 804 } 805 806 return 0; 807 } 808 809 /** 810 * vmw_cmd_cid_check - Check a command header for valid context information. 811 * 812 * @dev_priv: Pointer to a device private structure. 813 * @sw_context: Pointer to the software context. 814 * @header: A command header with an embedded user-space context handle. 815 * 816 * Convenience function: Call vmw_cmd_res_check with the user-space context 817 * handle embedded in @header. 818 */ 819 static int vmw_cmd_cid_check(struct vmw_private *dev_priv, 820 struct vmw_sw_context *sw_context, 821 SVGA3dCmdHeader *header) 822 { 823 VMW_DECLARE_CMD_VAR(*cmd, uint32_t) = 824 container_of(header, typeof(*cmd), header); 825 826 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 827 VMW_RES_DIRTY_SET, user_context_converter, 828 &cmd->body, NULL); 829 } 830 831 /** 832 * vmw_execbuf_info_from_res - Get the private validation metadata for a 833 * recently validated resource 834 * 835 * @sw_context: Pointer to the command submission context 836 * @res: The resource 837 * 838 * The resource pointed to by @res needs to be present in the command submission 839 * context's resource cache and hence the last resource of that type to be 840 * processed by the validation code. 841 * 842 * Return: a pointer to the private metadata of the resource, or NULL if it 843 * wasn't found 844 */ 845 static struct vmw_ctx_validation_info * 846 vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context, 847 struct vmw_resource *res) 848 { 849 struct vmw_res_cache_entry *rcache = 850 &sw_context->res_cache[vmw_res_type(res)]; 851 852 if (rcache->valid && rcache->res == res) 853 return rcache->private; 854 855 WARN_ON_ONCE(true); 856 return NULL; 857 } 858 859 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, 860 struct vmw_sw_context *sw_context, 861 SVGA3dCmdHeader *header) 862 { 863 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget); 864 struct vmw_resource *ctx; 865 struct vmw_resource *res; 866 int ret; 867 868 cmd = container_of(header, typeof(*cmd), header); 869 870 if (cmd->body.type >= SVGA3D_RT_MAX) { 871 VMW_DEBUG_USER("Illegal render target type %u.\n", 872 (unsigned int) cmd->body.type); 873 return -EINVAL; 874 } 875 876 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 877 VMW_RES_DIRTY_SET, user_context_converter, 878 &cmd->body.cid, &ctx); 879 if (unlikely(ret != 0)) 880 return ret; 881 882 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 883 VMW_RES_DIRTY_SET, user_surface_converter, 884 &cmd->body.target.sid, &res); 885 if (unlikely(ret)) 886 return ret; 887 888 if (dev_priv->has_mob) { 889 struct vmw_ctx_bindinfo_view binding; 890 struct vmw_ctx_validation_info *node; 891 892 node = vmw_execbuf_info_from_res(sw_context, ctx); 893 if (!node) 894 return -EINVAL; 895 896 binding.bi.ctx = ctx; 897 binding.bi.res = res; 898 binding.bi.bt = vmw_ctx_binding_rt; 899 binding.slot = cmd->body.type; 900 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot); 901 } 902 903 return 0; 904 } 905 906 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, 907 struct vmw_sw_context *sw_context, 908 SVGA3dCmdHeader *header) 909 { 910 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy); 911 int ret; 912 913 cmd = container_of(header, typeof(*cmd), header); 914 915 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 916 VMW_RES_DIRTY_NONE, user_surface_converter, 917 &cmd->body.src.sid, NULL); 918 if (ret) 919 return ret; 920 921 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 922 VMW_RES_DIRTY_SET, user_surface_converter, 923 &cmd->body.dest.sid, NULL); 924 } 925 926 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv, 927 struct vmw_sw_context *sw_context, 928 SVGA3dCmdHeader *header) 929 { 930 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy); 931 int ret; 932 933 cmd = container_of(header, typeof(*cmd), header); 934 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 935 VMW_RES_DIRTY_NONE, user_surface_converter, 936 &cmd->body.src, NULL); 937 if (ret != 0) 938 return ret; 939 940 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 941 VMW_RES_DIRTY_SET, user_surface_converter, 942 &cmd->body.dest, NULL); 943 } 944 945 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv, 946 struct vmw_sw_context *sw_context, 947 SVGA3dCmdHeader *header) 948 { 949 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion); 950 int ret; 951 952 cmd = container_of(header, typeof(*cmd), header); 953 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 954 VMW_RES_DIRTY_NONE, user_surface_converter, 955 &cmd->body.srcSid, NULL); 956 if (ret != 0) 957 return ret; 958 959 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 960 VMW_RES_DIRTY_SET, user_surface_converter, 961 &cmd->body.dstSid, NULL); 962 } 963 964 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, 965 struct vmw_sw_context *sw_context, 966 SVGA3dCmdHeader *header) 967 { 968 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt); 969 int ret; 970 971 cmd = container_of(header, typeof(*cmd), header); 972 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 973 VMW_RES_DIRTY_NONE, user_surface_converter, 974 &cmd->body.src.sid, NULL); 975 if (unlikely(ret != 0)) 976 return ret; 977 978 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 979 VMW_RES_DIRTY_SET, user_surface_converter, 980 &cmd->body.dest.sid, NULL); 981 } 982 983 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, 984 struct vmw_sw_context *sw_context, 985 SVGA3dCmdHeader *header) 986 { 987 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) = 988 container_of(header, typeof(*cmd), header); 989 990 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 991 VMW_RES_DIRTY_NONE, user_surface_converter, 992 &cmd->body.srcImage.sid, NULL); 993 } 994 995 static int vmw_cmd_present_check(struct vmw_private *dev_priv, 996 struct vmw_sw_context *sw_context, 997 SVGA3dCmdHeader *header) 998 { 999 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) = 1000 container_of(header, typeof(*cmd), header); 1001 1002 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1003 VMW_RES_DIRTY_NONE, user_surface_converter, 1004 &cmd->body.sid, NULL); 1005 } 1006 1007 /** 1008 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. 1009 * 1010 * @dev_priv: The device private structure. 1011 * @new_query_bo: The new buffer holding query results. 1012 * @sw_context: The software context used for this command submission. 1013 * 1014 * This function checks whether @new_query_bo is suitable for holding query 1015 * results, and if another buffer currently is pinned for query results. If so, 1016 * the function prepares the state of @sw_context for switching pinned buffers 1017 * after successful submission of the current command batch. 1018 */ 1019 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, 1020 struct vmw_buffer_object *new_query_bo, 1021 struct vmw_sw_context *sw_context) 1022 { 1023 struct vmw_res_cache_entry *ctx_entry = 1024 &sw_context->res_cache[vmw_res_context]; 1025 int ret; 1026 1027 BUG_ON(!ctx_entry->valid); 1028 sw_context->last_query_ctx = ctx_entry->res; 1029 1030 if (unlikely(new_query_bo != sw_context->cur_query_bo)) { 1031 1032 if (unlikely(PFN_UP(new_query_bo->base.resource->size) > 4)) { 1033 VMW_DEBUG_USER("Query buffer too large.\n"); 1034 return -EINVAL; 1035 } 1036 1037 if (unlikely(sw_context->cur_query_bo != NULL)) { 1038 sw_context->needs_post_query_barrier = true; 1039 ret = vmw_validation_add_bo(sw_context->ctx, 1040 sw_context->cur_query_bo, 1041 dev_priv->has_mob, false); 1042 if (unlikely(ret != 0)) 1043 return ret; 1044 } 1045 sw_context->cur_query_bo = new_query_bo; 1046 1047 ret = vmw_validation_add_bo(sw_context->ctx, 1048 dev_priv->dummy_query_bo, 1049 dev_priv->has_mob, false); 1050 if (unlikely(ret != 0)) 1051 return ret; 1052 } 1053 1054 return 0; 1055 } 1056 1057 /** 1058 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer 1059 * 1060 * @dev_priv: The device private structure. 1061 * @sw_context: The software context used for this command submission batch. 1062 * 1063 * This function will check if we're switching query buffers, and will then, 1064 * issue a dummy occlusion query wait used as a query barrier. When the fence 1065 * object following that query wait has signaled, we are sure that all preceding 1066 * queries have finished, and the old query buffer can be unpinned. However, 1067 * since both the new query buffer and the old one are fenced with that fence, 1068 * we can do an asynchronus unpin now, and be sure that the old query buffer 1069 * won't be moved until the fence has signaled. 1070 * 1071 * As mentioned above, both the new - and old query buffers need to be fenced 1072 * using a sequence emitted *after* calling this function. 1073 */ 1074 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, 1075 struct vmw_sw_context *sw_context) 1076 { 1077 /* 1078 * The validate list should still hold references to all 1079 * contexts here. 1080 */ 1081 if (sw_context->needs_post_query_barrier) { 1082 struct vmw_res_cache_entry *ctx_entry = 1083 &sw_context->res_cache[vmw_res_context]; 1084 struct vmw_resource *ctx; 1085 int ret; 1086 1087 BUG_ON(!ctx_entry->valid); 1088 ctx = ctx_entry->res; 1089 1090 ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id); 1091 1092 if (unlikely(ret != 0)) 1093 VMW_DEBUG_USER("Out of fifo space for dummy query.\n"); 1094 } 1095 1096 if (dev_priv->pinned_bo != sw_context->cur_query_bo) { 1097 if (dev_priv->pinned_bo) { 1098 vmw_bo_pin_reserved(dev_priv->pinned_bo, false); 1099 vmw_bo_unreference(&dev_priv->pinned_bo); 1100 } 1101 1102 if (!sw_context->needs_post_query_barrier) { 1103 vmw_bo_pin_reserved(sw_context->cur_query_bo, true); 1104 1105 /* 1106 * We pin also the dummy_query_bo buffer so that we 1107 * don't need to validate it when emitting dummy queries 1108 * in context destroy paths. 1109 */ 1110 if (!dev_priv->dummy_query_bo_pinned) { 1111 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, 1112 true); 1113 dev_priv->dummy_query_bo_pinned = true; 1114 } 1115 1116 BUG_ON(sw_context->last_query_ctx == NULL); 1117 dev_priv->query_cid = sw_context->last_query_ctx->id; 1118 dev_priv->query_cid_valid = true; 1119 dev_priv->pinned_bo = 1120 vmw_bo_reference(sw_context->cur_query_bo); 1121 } 1122 } 1123 } 1124 1125 /** 1126 * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle 1127 * to a MOB id. 1128 * 1129 * @dev_priv: Pointer to a device private structure. 1130 * @sw_context: The software context used for this command batch validation. 1131 * @id: Pointer to the user-space handle to be translated. 1132 * @vmw_bo_p: Points to a location that, on successful return will carry a 1133 * non-reference-counted pointer to the buffer object identified by the 1134 * user-space handle in @id. 1135 * 1136 * This function saves information needed to translate a user-space buffer 1137 * handle to a MOB id. The translation does not take place immediately, but 1138 * during a call to vmw_apply_relocations(). 1139 * 1140 * This function builds a relocation list and a list of buffers to validate. The 1141 * former needs to be freed using either vmw_apply_relocations() or 1142 * vmw_free_relocations(). The latter needs to be freed using 1143 * vmw_clear_validations. 1144 */ 1145 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, 1146 struct vmw_sw_context *sw_context, 1147 SVGAMobId *id, 1148 struct vmw_buffer_object **vmw_bo_p) 1149 { 1150 struct vmw_buffer_object *vmw_bo; 1151 uint32_t handle = *id; 1152 struct vmw_relocation *reloc; 1153 int ret; 1154 1155 vmw_validation_preload_bo(sw_context->ctx); 1156 ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo); 1157 if (ret != 0) { 1158 drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n"); 1159 return PTR_ERR(vmw_bo); 1160 } 1161 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false); 1162 ttm_bo_put(&vmw_bo->base); 1163 if (unlikely(ret != 0)) 1164 return ret; 1165 1166 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc)); 1167 if (!reloc) 1168 return -ENOMEM; 1169 1170 reloc->mob_loc = id; 1171 reloc->vbo = vmw_bo; 1172 1173 *vmw_bo_p = vmw_bo; 1174 list_add_tail(&reloc->head, &sw_context->bo_relocations); 1175 1176 return 0; 1177 } 1178 1179 /** 1180 * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle 1181 * to a valid SVGAGuestPtr 1182 * 1183 * @dev_priv: Pointer to a device private structure. 1184 * @sw_context: The software context used for this command batch validation. 1185 * @ptr: Pointer to the user-space handle to be translated. 1186 * @vmw_bo_p: Points to a location that, on successful return will carry a 1187 * non-reference-counted pointer to the DMA buffer identified by the user-space 1188 * handle in @id. 1189 * 1190 * This function saves information needed to translate a user-space buffer 1191 * handle to a valid SVGAGuestPtr. The translation does not take place 1192 * immediately, but during a call to vmw_apply_relocations(). 1193 * 1194 * This function builds a relocation list and a list of buffers to validate. 1195 * The former needs to be freed using either vmw_apply_relocations() or 1196 * vmw_free_relocations(). The latter needs to be freed using 1197 * vmw_clear_validations. 1198 */ 1199 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, 1200 struct vmw_sw_context *sw_context, 1201 SVGAGuestPtr *ptr, 1202 struct vmw_buffer_object **vmw_bo_p) 1203 { 1204 struct vmw_buffer_object *vmw_bo; 1205 uint32_t handle = ptr->gmrId; 1206 struct vmw_relocation *reloc; 1207 int ret; 1208 1209 vmw_validation_preload_bo(sw_context->ctx); 1210 ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo); 1211 if (ret != 0) { 1212 drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n"); 1213 return PTR_ERR(vmw_bo); 1214 } 1215 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false); 1216 ttm_bo_put(&vmw_bo->base); 1217 if (unlikely(ret != 0)) 1218 return ret; 1219 1220 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc)); 1221 if (!reloc) 1222 return -ENOMEM; 1223 1224 reloc->location = ptr; 1225 reloc->vbo = vmw_bo; 1226 *vmw_bo_p = vmw_bo; 1227 list_add_tail(&reloc->head, &sw_context->bo_relocations); 1228 1229 return 0; 1230 } 1231 1232 /** 1233 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command. 1234 * 1235 * @dev_priv: Pointer to a device private struct. 1236 * @sw_context: The software context used for this command submission. 1237 * @header: Pointer to the command header in the command stream. 1238 * 1239 * This function adds the new query into the query COTABLE 1240 */ 1241 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv, 1242 struct vmw_sw_context *sw_context, 1243 SVGA3dCmdHeader *header) 1244 { 1245 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery); 1246 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 1247 struct vmw_resource *cotable_res; 1248 int ret; 1249 1250 if (!ctx_node) 1251 return -EINVAL; 1252 1253 cmd = container_of(header, typeof(*cmd), header); 1254 1255 if (cmd->body.type < SVGA3D_QUERYTYPE_MIN || 1256 cmd->body.type >= SVGA3D_QUERYTYPE_MAX) 1257 return -EINVAL; 1258 1259 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY); 1260 ret = vmw_cotable_notify(cotable_res, cmd->body.queryId); 1261 1262 return ret; 1263 } 1264 1265 /** 1266 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command. 1267 * 1268 * @dev_priv: Pointer to a device private struct. 1269 * @sw_context: The software context used for this command submission. 1270 * @header: Pointer to the command header in the command stream. 1271 * 1272 * The query bind operation will eventually associate the query ID with its 1273 * backing MOB. In this function, we take the user mode MOB ID and use 1274 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent. 1275 */ 1276 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, 1277 struct vmw_sw_context *sw_context, 1278 SVGA3dCmdHeader *header) 1279 { 1280 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery); 1281 struct vmw_buffer_object *vmw_bo; 1282 int ret; 1283 1284 cmd = container_of(header, typeof(*cmd), header); 1285 1286 /* 1287 * Look up the buffer pointed to by q.mobid, put it on the relocation 1288 * list so its kernel mode MOB ID can be filled in later 1289 */ 1290 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid, 1291 &vmw_bo); 1292 1293 if (ret != 0) 1294 return ret; 1295 1296 sw_context->dx_query_mob = vmw_bo; 1297 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx; 1298 return 0; 1299 } 1300 1301 /** 1302 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command. 1303 * 1304 * @dev_priv: Pointer to a device private struct. 1305 * @sw_context: The software context used for this command submission. 1306 * @header: Pointer to the command header in the command stream. 1307 */ 1308 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, 1309 struct vmw_sw_context *sw_context, 1310 SVGA3dCmdHeader *header) 1311 { 1312 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) = 1313 container_of(header, typeof(*cmd), header); 1314 1315 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1316 VMW_RES_DIRTY_SET, user_context_converter, 1317 &cmd->body.cid, NULL); 1318 } 1319 1320 /** 1321 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command. 1322 * 1323 * @dev_priv: Pointer to a device private struct. 1324 * @sw_context: The software context used for this command submission. 1325 * @header: Pointer to the command header in the command stream. 1326 */ 1327 static int vmw_cmd_begin_query(struct vmw_private *dev_priv, 1328 struct vmw_sw_context *sw_context, 1329 SVGA3dCmdHeader *header) 1330 { 1331 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) = 1332 container_of(header, typeof(*cmd), header); 1333 1334 if (unlikely(dev_priv->has_mob)) { 1335 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery); 1336 1337 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); 1338 1339 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY; 1340 gb_cmd.header.size = cmd->header.size; 1341 gb_cmd.body.cid = cmd->body.cid; 1342 gb_cmd.body.type = cmd->body.type; 1343 1344 memcpy(cmd, &gb_cmd, sizeof(*cmd)); 1345 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header); 1346 } 1347 1348 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1349 VMW_RES_DIRTY_SET, user_context_converter, 1350 &cmd->body.cid, NULL); 1351 } 1352 1353 /** 1354 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command. 1355 * 1356 * @dev_priv: Pointer to a device private struct. 1357 * @sw_context: The software context used for this command submission. 1358 * @header: Pointer to the command header in the command stream. 1359 */ 1360 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, 1361 struct vmw_sw_context *sw_context, 1362 SVGA3dCmdHeader *header) 1363 { 1364 struct vmw_buffer_object *vmw_bo; 1365 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery); 1366 int ret; 1367 1368 cmd = container_of(header, typeof(*cmd), header); 1369 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1370 if (unlikely(ret != 0)) 1371 return ret; 1372 1373 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid, 1374 &vmw_bo); 1375 if (unlikely(ret != 0)) 1376 return ret; 1377 1378 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); 1379 1380 return ret; 1381 } 1382 1383 /** 1384 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command. 1385 * 1386 * @dev_priv: Pointer to a device private struct. 1387 * @sw_context: The software context used for this command submission. 1388 * @header: Pointer to the command header in the command stream. 1389 */ 1390 static int vmw_cmd_end_query(struct vmw_private *dev_priv, 1391 struct vmw_sw_context *sw_context, 1392 SVGA3dCmdHeader *header) 1393 { 1394 struct vmw_buffer_object *vmw_bo; 1395 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery); 1396 int ret; 1397 1398 cmd = container_of(header, typeof(*cmd), header); 1399 if (dev_priv->has_mob) { 1400 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery); 1401 1402 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); 1403 1404 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY; 1405 gb_cmd.header.size = cmd->header.size; 1406 gb_cmd.body.cid = cmd->body.cid; 1407 gb_cmd.body.type = cmd->body.type; 1408 gb_cmd.body.mobid = cmd->body.guestResult.gmrId; 1409 gb_cmd.body.offset = cmd->body.guestResult.offset; 1410 1411 memcpy(cmd, &gb_cmd, sizeof(*cmd)); 1412 return vmw_cmd_end_gb_query(dev_priv, sw_context, header); 1413 } 1414 1415 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1416 if (unlikely(ret != 0)) 1417 return ret; 1418 1419 ret = vmw_translate_guest_ptr(dev_priv, sw_context, 1420 &cmd->body.guestResult, &vmw_bo); 1421 if (unlikely(ret != 0)) 1422 return ret; 1423 1424 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); 1425 1426 return ret; 1427 } 1428 1429 /** 1430 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command. 1431 * 1432 * @dev_priv: Pointer to a device private struct. 1433 * @sw_context: The software context used for this command submission. 1434 * @header: Pointer to the command header in the command stream. 1435 */ 1436 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, 1437 struct vmw_sw_context *sw_context, 1438 SVGA3dCmdHeader *header) 1439 { 1440 struct vmw_buffer_object *vmw_bo; 1441 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery); 1442 int ret; 1443 1444 cmd = container_of(header, typeof(*cmd), header); 1445 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1446 if (unlikely(ret != 0)) 1447 return ret; 1448 1449 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid, 1450 &vmw_bo); 1451 if (unlikely(ret != 0)) 1452 return ret; 1453 1454 return 0; 1455 } 1456 1457 /** 1458 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command. 1459 * 1460 * @dev_priv: Pointer to a device private struct. 1461 * @sw_context: The software context used for this command submission. 1462 * @header: Pointer to the command header in the command stream. 1463 */ 1464 static int vmw_cmd_wait_query(struct vmw_private *dev_priv, 1465 struct vmw_sw_context *sw_context, 1466 SVGA3dCmdHeader *header) 1467 { 1468 struct vmw_buffer_object *vmw_bo; 1469 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery); 1470 int ret; 1471 1472 cmd = container_of(header, typeof(*cmd), header); 1473 if (dev_priv->has_mob) { 1474 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery); 1475 1476 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); 1477 1478 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; 1479 gb_cmd.header.size = cmd->header.size; 1480 gb_cmd.body.cid = cmd->body.cid; 1481 gb_cmd.body.type = cmd->body.type; 1482 gb_cmd.body.mobid = cmd->body.guestResult.gmrId; 1483 gb_cmd.body.offset = cmd->body.guestResult.offset; 1484 1485 memcpy(cmd, &gb_cmd, sizeof(*cmd)); 1486 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header); 1487 } 1488 1489 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1490 if (unlikely(ret != 0)) 1491 return ret; 1492 1493 ret = vmw_translate_guest_ptr(dev_priv, sw_context, 1494 &cmd->body.guestResult, &vmw_bo); 1495 if (unlikely(ret != 0)) 1496 return ret; 1497 1498 return 0; 1499 } 1500 1501 static int vmw_cmd_dma(struct vmw_private *dev_priv, 1502 struct vmw_sw_context *sw_context, 1503 SVGA3dCmdHeader *header) 1504 { 1505 struct vmw_buffer_object *vmw_bo = NULL; 1506 struct vmw_surface *srf = NULL; 1507 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA); 1508 int ret; 1509 SVGA3dCmdSurfaceDMASuffix *suffix; 1510 uint32_t bo_size; 1511 bool dirty; 1512 1513 cmd = container_of(header, typeof(*cmd), header); 1514 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body + 1515 header->size - sizeof(*suffix)); 1516 1517 /* Make sure device and verifier stays in sync. */ 1518 if (unlikely(suffix->suffixSize != sizeof(*suffix))) { 1519 VMW_DEBUG_USER("Invalid DMA suffix size.\n"); 1520 return -EINVAL; 1521 } 1522 1523 ret = vmw_translate_guest_ptr(dev_priv, sw_context, 1524 &cmd->body.guest.ptr, &vmw_bo); 1525 if (unlikely(ret != 0)) 1526 return ret; 1527 1528 /* Make sure DMA doesn't cross BO boundaries. */ 1529 bo_size = vmw_bo->base.base.size; 1530 if (unlikely(cmd->body.guest.ptr.offset > bo_size)) { 1531 VMW_DEBUG_USER("Invalid DMA offset.\n"); 1532 return -EINVAL; 1533 } 1534 1535 bo_size -= cmd->body.guest.ptr.offset; 1536 if (unlikely(suffix->maximumOffset > bo_size)) 1537 suffix->maximumOffset = bo_size; 1538 1539 dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ? 1540 VMW_RES_DIRTY_SET : 0; 1541 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1542 dirty, user_surface_converter, 1543 &cmd->body.host.sid, NULL); 1544 if (unlikely(ret != 0)) { 1545 if (unlikely(ret != -ERESTARTSYS)) 1546 VMW_DEBUG_USER("could not find surface for DMA.\n"); 1547 return ret; 1548 } 1549 1550 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); 1551 1552 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header); 1553 1554 return 0; 1555 } 1556 1557 static int vmw_cmd_draw(struct vmw_private *dev_priv, 1558 struct vmw_sw_context *sw_context, 1559 SVGA3dCmdHeader *header) 1560 { 1561 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives); 1562 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)( 1563 (unsigned long)header + sizeof(*cmd)); 1564 SVGA3dPrimitiveRange *range; 1565 uint32_t i; 1566 uint32_t maxnum; 1567 int ret; 1568 1569 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1570 if (unlikely(ret != 0)) 1571 return ret; 1572 1573 cmd = container_of(header, typeof(*cmd), header); 1574 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl); 1575 1576 if (unlikely(cmd->body.numVertexDecls > maxnum)) { 1577 VMW_DEBUG_USER("Illegal number of vertex declarations.\n"); 1578 return -EINVAL; 1579 } 1580 1581 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { 1582 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1583 VMW_RES_DIRTY_NONE, 1584 user_surface_converter, 1585 &decl->array.surfaceId, NULL); 1586 if (unlikely(ret != 0)) 1587 return ret; 1588 } 1589 1590 maxnum = (header->size - sizeof(cmd->body) - 1591 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range); 1592 if (unlikely(cmd->body.numRanges > maxnum)) { 1593 VMW_DEBUG_USER("Illegal number of index ranges.\n"); 1594 return -EINVAL; 1595 } 1596 1597 range = (SVGA3dPrimitiveRange *) decl; 1598 for (i = 0; i < cmd->body.numRanges; ++i, ++range) { 1599 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1600 VMW_RES_DIRTY_NONE, 1601 user_surface_converter, 1602 &range->indexArray.surfaceId, NULL); 1603 if (unlikely(ret != 0)) 1604 return ret; 1605 } 1606 return 0; 1607 } 1608 1609 static int vmw_cmd_tex_state(struct vmw_private *dev_priv, 1610 struct vmw_sw_context *sw_context, 1611 SVGA3dCmdHeader *header) 1612 { 1613 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState); 1614 SVGA3dTextureState *last_state = (SVGA3dTextureState *) 1615 ((unsigned long) header + header->size + sizeof(header)); 1616 SVGA3dTextureState *cur_state = (SVGA3dTextureState *) 1617 ((unsigned long) header + sizeof(*cmd)); 1618 struct vmw_resource *ctx; 1619 struct vmw_resource *res; 1620 int ret; 1621 1622 cmd = container_of(header, typeof(*cmd), header); 1623 1624 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1625 VMW_RES_DIRTY_SET, user_context_converter, 1626 &cmd->body.cid, &ctx); 1627 if (unlikely(ret != 0)) 1628 return ret; 1629 1630 for (; cur_state < last_state; ++cur_state) { 1631 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) 1632 continue; 1633 1634 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) { 1635 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n", 1636 (unsigned int) cur_state->stage); 1637 return -EINVAL; 1638 } 1639 1640 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1641 VMW_RES_DIRTY_NONE, 1642 user_surface_converter, 1643 &cur_state->value, &res); 1644 if (unlikely(ret != 0)) 1645 return ret; 1646 1647 if (dev_priv->has_mob) { 1648 struct vmw_ctx_bindinfo_tex binding; 1649 struct vmw_ctx_validation_info *node; 1650 1651 node = vmw_execbuf_info_from_res(sw_context, ctx); 1652 if (!node) 1653 return -EINVAL; 1654 1655 binding.bi.ctx = ctx; 1656 binding.bi.res = res; 1657 binding.bi.bt = vmw_ctx_binding_tex; 1658 binding.texture_stage = cur_state->stage; 1659 vmw_binding_add(node->staged, &binding.bi, 0, 1660 binding.texture_stage); 1661 } 1662 } 1663 1664 return 0; 1665 } 1666 1667 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, 1668 struct vmw_sw_context *sw_context, 1669 void *buf) 1670 { 1671 struct vmw_buffer_object *vmw_bo; 1672 1673 struct { 1674 uint32_t header; 1675 SVGAFifoCmdDefineGMRFB body; 1676 } *cmd = buf; 1677 1678 return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr, 1679 &vmw_bo); 1680 } 1681 1682 /** 1683 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer 1684 * switching 1685 * 1686 * @dev_priv: Pointer to a device private struct. 1687 * @sw_context: The software context being used for this batch. 1688 * @res: Pointer to the resource. 1689 * @buf_id: Pointer to the user-space backup buffer handle in the command 1690 * stream. 1691 * @backup_offset: Offset of backup into MOB. 1692 * 1693 * This function prepares for registering a switch of backup buffers in the 1694 * resource metadata just prior to unreserving. It's basically a wrapper around 1695 * vmw_cmd_res_switch_backup with a different interface. 1696 */ 1697 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, 1698 struct vmw_sw_context *sw_context, 1699 struct vmw_resource *res, uint32_t *buf_id, 1700 unsigned long backup_offset) 1701 { 1702 struct vmw_buffer_object *vbo; 1703 void *info; 1704 int ret; 1705 1706 info = vmw_execbuf_info_from_res(sw_context, res); 1707 if (!info) 1708 return -EINVAL; 1709 1710 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo); 1711 if (ret) 1712 return ret; 1713 1714 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo, 1715 backup_offset); 1716 return 0; 1717 } 1718 1719 /** 1720 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching 1721 * 1722 * @dev_priv: Pointer to a device private struct. 1723 * @sw_context: The software context being used for this batch. 1724 * @res_type: The resource type. 1725 * @converter: Information about user-space binding for this resource type. 1726 * @res_id: Pointer to the user-space resource handle in the command stream. 1727 * @buf_id: Pointer to the user-space backup buffer handle in the command 1728 * stream. 1729 * @backup_offset: Offset of backup into MOB. 1730 * 1731 * This function prepares for registering a switch of backup buffers in the 1732 * resource metadata just prior to unreserving. It's basically a wrapper around 1733 * vmw_cmd_res_switch_backup with a different interface. 1734 */ 1735 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, 1736 struct vmw_sw_context *sw_context, 1737 enum vmw_res_type res_type, 1738 const struct vmw_user_resource_conv 1739 *converter, uint32_t *res_id, uint32_t *buf_id, 1740 unsigned long backup_offset) 1741 { 1742 struct vmw_resource *res; 1743 int ret; 1744 1745 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, 1746 VMW_RES_DIRTY_NONE, converter, res_id, &res); 1747 if (ret) 1748 return ret; 1749 1750 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id, 1751 backup_offset); 1752 } 1753 1754 /** 1755 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command 1756 * 1757 * @dev_priv: Pointer to a device private struct. 1758 * @sw_context: The software context being used for this batch. 1759 * @header: Pointer to the command header in the command stream. 1760 */ 1761 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, 1762 struct vmw_sw_context *sw_context, 1763 SVGA3dCmdHeader *header) 1764 { 1765 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) = 1766 container_of(header, typeof(*cmd), header); 1767 1768 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface, 1769 user_surface_converter, &cmd->body.sid, 1770 &cmd->body.mobid, 0); 1771 } 1772 1773 /** 1774 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command 1775 * 1776 * @dev_priv: Pointer to a device private struct. 1777 * @sw_context: The software context being used for this batch. 1778 * @header: Pointer to the command header in the command stream. 1779 */ 1780 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, 1781 struct vmw_sw_context *sw_context, 1782 SVGA3dCmdHeader *header) 1783 { 1784 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) = 1785 container_of(header, typeof(*cmd), header); 1786 1787 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1788 VMW_RES_DIRTY_NONE, user_surface_converter, 1789 &cmd->body.image.sid, NULL); 1790 } 1791 1792 /** 1793 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command 1794 * 1795 * @dev_priv: Pointer to a device private struct. 1796 * @sw_context: The software context being used for this batch. 1797 * @header: Pointer to the command header in the command stream. 1798 */ 1799 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, 1800 struct vmw_sw_context *sw_context, 1801 SVGA3dCmdHeader *header) 1802 { 1803 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) = 1804 container_of(header, typeof(*cmd), header); 1805 1806 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1807 VMW_RES_DIRTY_CLEAR, user_surface_converter, 1808 &cmd->body.sid, NULL); 1809 } 1810 1811 /** 1812 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command 1813 * 1814 * @dev_priv: Pointer to a device private struct. 1815 * @sw_context: The software context being used for this batch. 1816 * @header: Pointer to the command header in the command stream. 1817 */ 1818 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, 1819 struct vmw_sw_context *sw_context, 1820 SVGA3dCmdHeader *header) 1821 { 1822 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) = 1823 container_of(header, typeof(*cmd), header); 1824 1825 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1826 VMW_RES_DIRTY_NONE, user_surface_converter, 1827 &cmd->body.image.sid, NULL); 1828 } 1829 1830 /** 1831 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE 1832 * command 1833 * 1834 * @dev_priv: Pointer to a device private struct. 1835 * @sw_context: The software context being used for this batch. 1836 * @header: Pointer to the command header in the command stream. 1837 */ 1838 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, 1839 struct vmw_sw_context *sw_context, 1840 SVGA3dCmdHeader *header) 1841 { 1842 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) = 1843 container_of(header, typeof(*cmd), header); 1844 1845 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1846 VMW_RES_DIRTY_CLEAR, user_surface_converter, 1847 &cmd->body.sid, NULL); 1848 } 1849 1850 /** 1851 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE 1852 * command 1853 * 1854 * @dev_priv: Pointer to a device private struct. 1855 * @sw_context: The software context being used for this batch. 1856 * @header: Pointer to the command header in the command stream. 1857 */ 1858 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, 1859 struct vmw_sw_context *sw_context, 1860 SVGA3dCmdHeader *header) 1861 { 1862 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) = 1863 container_of(header, typeof(*cmd), header); 1864 1865 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1866 VMW_RES_DIRTY_NONE, user_surface_converter, 1867 &cmd->body.image.sid, NULL); 1868 } 1869 1870 /** 1871 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE 1872 * command 1873 * 1874 * @dev_priv: Pointer to a device private struct. 1875 * @sw_context: The software context being used for this batch. 1876 * @header: Pointer to the command header in the command stream. 1877 */ 1878 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, 1879 struct vmw_sw_context *sw_context, 1880 SVGA3dCmdHeader *header) 1881 { 1882 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) = 1883 container_of(header, typeof(*cmd), header); 1884 1885 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1886 VMW_RES_DIRTY_CLEAR, user_surface_converter, 1887 &cmd->body.sid, NULL); 1888 } 1889 1890 /** 1891 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command 1892 * 1893 * @dev_priv: Pointer to a device private struct. 1894 * @sw_context: The software context being used for this batch. 1895 * @header: Pointer to the command header in the command stream. 1896 */ 1897 static int vmw_cmd_shader_define(struct vmw_private *dev_priv, 1898 struct vmw_sw_context *sw_context, 1899 SVGA3dCmdHeader *header) 1900 { 1901 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader); 1902 int ret; 1903 size_t size; 1904 struct vmw_resource *ctx; 1905 1906 cmd = container_of(header, typeof(*cmd), header); 1907 1908 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1909 VMW_RES_DIRTY_SET, user_context_converter, 1910 &cmd->body.cid, &ctx); 1911 if (unlikely(ret != 0)) 1912 return ret; 1913 1914 if (unlikely(!dev_priv->has_mob)) 1915 return 0; 1916 1917 size = cmd->header.size - sizeof(cmd->body); 1918 ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx), 1919 cmd->body.shid, cmd + 1, cmd->body.type, 1920 size, &sw_context->staged_cmd_res); 1921 if (unlikely(ret != 0)) 1922 return ret; 1923 1924 return vmw_resource_relocation_add(sw_context, NULL, 1925 vmw_ptr_diff(sw_context->buf_start, 1926 &cmd->header.id), 1927 vmw_res_rel_nop); 1928 } 1929 1930 /** 1931 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command 1932 * 1933 * @dev_priv: Pointer to a device private struct. 1934 * @sw_context: The software context being used for this batch. 1935 * @header: Pointer to the command header in the command stream. 1936 */ 1937 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, 1938 struct vmw_sw_context *sw_context, 1939 SVGA3dCmdHeader *header) 1940 { 1941 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader); 1942 int ret; 1943 struct vmw_resource *ctx; 1944 1945 cmd = container_of(header, typeof(*cmd), header); 1946 1947 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1948 VMW_RES_DIRTY_SET, user_context_converter, 1949 &cmd->body.cid, &ctx); 1950 if (unlikely(ret != 0)) 1951 return ret; 1952 1953 if (unlikely(!dev_priv->has_mob)) 1954 return 0; 1955 1956 ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid, 1957 cmd->body.type, &sw_context->staged_cmd_res); 1958 if (unlikely(ret != 0)) 1959 return ret; 1960 1961 return vmw_resource_relocation_add(sw_context, NULL, 1962 vmw_ptr_diff(sw_context->buf_start, 1963 &cmd->header.id), 1964 vmw_res_rel_nop); 1965 } 1966 1967 /** 1968 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command 1969 * 1970 * @dev_priv: Pointer to a device private struct. 1971 * @sw_context: The software context being used for this batch. 1972 * @header: Pointer to the command header in the command stream. 1973 */ 1974 static int vmw_cmd_set_shader(struct vmw_private *dev_priv, 1975 struct vmw_sw_context *sw_context, 1976 SVGA3dCmdHeader *header) 1977 { 1978 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader); 1979 struct vmw_ctx_bindinfo_shader binding; 1980 struct vmw_resource *ctx, *res = NULL; 1981 struct vmw_ctx_validation_info *ctx_info; 1982 int ret; 1983 1984 cmd = container_of(header, typeof(*cmd), header); 1985 1986 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) { 1987 VMW_DEBUG_USER("Illegal shader type %u.\n", 1988 (unsigned int) cmd->body.type); 1989 return -EINVAL; 1990 } 1991 1992 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1993 VMW_RES_DIRTY_SET, user_context_converter, 1994 &cmd->body.cid, &ctx); 1995 if (unlikely(ret != 0)) 1996 return ret; 1997 1998 if (!dev_priv->has_mob) 1999 return 0; 2000 2001 if (cmd->body.shid != SVGA3D_INVALID_ID) { 2002 /* 2003 * This is the compat shader path - Per device guest-backed 2004 * shaders, but user-space thinks it's per context host- 2005 * backed shaders. 2006 */ 2007 res = vmw_shader_lookup(vmw_context_res_man(ctx), 2008 cmd->body.shid, cmd->body.type); 2009 if (!IS_ERR(res)) { 2010 ret = vmw_execbuf_res_val_add(sw_context, res, 2011 VMW_RES_DIRTY_NONE, 2012 vmw_val_add_flag_noctx); 2013 if (unlikely(ret != 0)) 2014 return ret; 2015 2016 ret = vmw_resource_relocation_add 2017 (sw_context, res, 2018 vmw_ptr_diff(sw_context->buf_start, 2019 &cmd->body.shid), 2020 vmw_res_rel_normal); 2021 if (unlikely(ret != 0)) 2022 return ret; 2023 } 2024 } 2025 2026 if (IS_ERR_OR_NULL(res)) { 2027 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, 2028 VMW_RES_DIRTY_NONE, 2029 user_shader_converter, &cmd->body.shid, 2030 &res); 2031 if (unlikely(ret != 0)) 2032 return ret; 2033 } 2034 2035 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx); 2036 if (!ctx_info) 2037 return -EINVAL; 2038 2039 binding.bi.ctx = ctx; 2040 binding.bi.res = res; 2041 binding.bi.bt = vmw_ctx_binding_shader; 2042 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; 2043 vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0); 2044 2045 return 0; 2046 } 2047 2048 /** 2049 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command 2050 * 2051 * @dev_priv: Pointer to a device private struct. 2052 * @sw_context: The software context being used for this batch. 2053 * @header: Pointer to the command header in the command stream. 2054 */ 2055 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv, 2056 struct vmw_sw_context *sw_context, 2057 SVGA3dCmdHeader *header) 2058 { 2059 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst); 2060 int ret; 2061 2062 cmd = container_of(header, typeof(*cmd), header); 2063 2064 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 2065 VMW_RES_DIRTY_SET, user_context_converter, 2066 &cmd->body.cid, NULL); 2067 if (unlikely(ret != 0)) 2068 return ret; 2069 2070 if (dev_priv->has_mob) 2071 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE; 2072 2073 return 0; 2074 } 2075 2076 /** 2077 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command 2078 * 2079 * @dev_priv: Pointer to a device private struct. 2080 * @sw_context: The software context being used for this batch. 2081 * @header: Pointer to the command header in the command stream. 2082 */ 2083 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, 2084 struct vmw_sw_context *sw_context, 2085 SVGA3dCmdHeader *header) 2086 { 2087 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) = 2088 container_of(header, typeof(*cmd), header); 2089 2090 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader, 2091 user_shader_converter, &cmd->body.shid, 2092 &cmd->body.mobid, cmd->body.offsetInBytes); 2093 } 2094 2095 /** 2096 * vmw_cmd_dx_set_single_constant_buffer - Validate 2097 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command. 2098 * 2099 * @dev_priv: Pointer to a device private struct. 2100 * @sw_context: The software context being used for this batch. 2101 * @header: Pointer to the command header in the command stream. 2102 */ 2103 static int 2104 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, 2105 struct vmw_sw_context *sw_context, 2106 SVGA3dCmdHeader *header) 2107 { 2108 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer); 2109 SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ? 2110 SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10; 2111 2112 struct vmw_resource *res = NULL; 2113 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2114 struct vmw_ctx_bindinfo_cb binding; 2115 int ret; 2116 2117 if (!ctx_node) 2118 return -EINVAL; 2119 2120 cmd = container_of(header, typeof(*cmd), header); 2121 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2122 VMW_RES_DIRTY_NONE, user_surface_converter, 2123 &cmd->body.sid, &res); 2124 if (unlikely(ret != 0)) 2125 return ret; 2126 2127 binding.bi.ctx = ctx_node->ctx; 2128 binding.bi.res = res; 2129 binding.bi.bt = vmw_ctx_binding_cb; 2130 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; 2131 binding.offset = cmd->body.offsetInBytes; 2132 binding.size = cmd->body.sizeInBytes; 2133 binding.slot = cmd->body.slot; 2134 2135 if (binding.shader_slot >= max_shader_num || 2136 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { 2137 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n", 2138 (unsigned int) cmd->body.type, 2139 (unsigned int) binding.slot); 2140 return -EINVAL; 2141 } 2142 2143 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 2144 binding.slot); 2145 2146 return 0; 2147 } 2148 2149 /** 2150 * vmw_cmd_dx_set_constant_buffer_offset - Validate 2151 * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command. 2152 * 2153 * @dev_priv: Pointer to a device private struct. 2154 * @sw_context: The software context being used for this batch. 2155 * @header: Pointer to the command header in the command stream. 2156 */ 2157 static int 2158 vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv, 2159 struct vmw_sw_context *sw_context, 2160 SVGA3dCmdHeader *header) 2161 { 2162 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset); 2163 2164 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2165 u32 shader_slot; 2166 2167 if (!has_sm5_context(dev_priv)) 2168 return -EINVAL; 2169 2170 if (!ctx_node) 2171 return -EINVAL; 2172 2173 cmd = container_of(header, typeof(*cmd), header); 2174 if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { 2175 VMW_DEBUG_USER("Illegal const buffer slot %u.\n", 2176 (unsigned int) cmd->body.slot); 2177 return -EINVAL; 2178 } 2179 2180 shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET; 2181 vmw_binding_cb_offset_update(ctx_node->staged, shader_slot, 2182 cmd->body.slot, cmd->body.offsetInBytes); 2183 2184 return 0; 2185 } 2186 2187 /** 2188 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES 2189 * command 2190 * 2191 * @dev_priv: Pointer to a device private struct. 2192 * @sw_context: The software context being used for this batch. 2193 * @header: Pointer to the command header in the command stream. 2194 */ 2195 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv, 2196 struct vmw_sw_context *sw_context, 2197 SVGA3dCmdHeader *header) 2198 { 2199 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) = 2200 container_of(header, typeof(*cmd), header); 2201 SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ? 2202 SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX; 2203 2204 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) / 2205 sizeof(SVGA3dShaderResourceViewId); 2206 2207 if ((u64) cmd->body.startView + (u64) num_sr_view > 2208 (u64) SVGA3D_DX_MAX_SRVIEWS || 2209 cmd->body.type >= max_allowed) { 2210 VMW_DEBUG_USER("Invalid shader binding.\n"); 2211 return -EINVAL; 2212 } 2213 2214 return vmw_view_bindings_add(sw_context, vmw_view_sr, 2215 vmw_ctx_binding_sr, 2216 cmd->body.type - SVGA3D_SHADERTYPE_MIN, 2217 (void *) &cmd[1], num_sr_view, 2218 cmd->body.startView); 2219 } 2220 2221 /** 2222 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command 2223 * 2224 * @dev_priv: Pointer to a device private struct. 2225 * @sw_context: The software context being used for this batch. 2226 * @header: Pointer to the command header in the command stream. 2227 */ 2228 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, 2229 struct vmw_sw_context *sw_context, 2230 SVGA3dCmdHeader *header) 2231 { 2232 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader); 2233 SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ? 2234 SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX; 2235 struct vmw_resource *res = NULL; 2236 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2237 struct vmw_ctx_bindinfo_shader binding; 2238 int ret = 0; 2239 2240 if (!ctx_node) 2241 return -EINVAL; 2242 2243 cmd = container_of(header, typeof(*cmd), header); 2244 2245 if (cmd->body.type >= max_allowed || 2246 cmd->body.type < SVGA3D_SHADERTYPE_MIN) { 2247 VMW_DEBUG_USER("Illegal shader type %u.\n", 2248 (unsigned int) cmd->body.type); 2249 return -EINVAL; 2250 } 2251 2252 if (cmd->body.shaderId != SVGA3D_INVALID_ID) { 2253 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0); 2254 if (IS_ERR(res)) { 2255 VMW_DEBUG_USER("Could not find shader for binding.\n"); 2256 return PTR_ERR(res); 2257 } 2258 2259 ret = vmw_execbuf_res_val_add(sw_context, res, 2260 VMW_RES_DIRTY_NONE, 2261 vmw_val_add_flag_noctx); 2262 if (ret) 2263 return ret; 2264 } 2265 2266 binding.bi.ctx = ctx_node->ctx; 2267 binding.bi.res = res; 2268 binding.bi.bt = vmw_ctx_binding_dx_shader; 2269 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; 2270 2271 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0); 2272 2273 return 0; 2274 } 2275 2276 /** 2277 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS 2278 * command 2279 * 2280 * @dev_priv: Pointer to a device private struct. 2281 * @sw_context: The software context being used for this batch. 2282 * @header: Pointer to the command header in the command stream. 2283 */ 2284 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, 2285 struct vmw_sw_context *sw_context, 2286 SVGA3dCmdHeader *header) 2287 { 2288 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2289 struct vmw_ctx_bindinfo_vb binding; 2290 struct vmw_resource *res; 2291 struct { 2292 SVGA3dCmdHeader header; 2293 SVGA3dCmdDXSetVertexBuffers body; 2294 SVGA3dVertexBuffer buf[]; 2295 } *cmd; 2296 int i, ret, num; 2297 2298 if (!ctx_node) 2299 return -EINVAL; 2300 2301 cmd = container_of(header, typeof(*cmd), header); 2302 num = (cmd->header.size - sizeof(cmd->body)) / 2303 sizeof(SVGA3dVertexBuffer); 2304 if ((u64)num + (u64)cmd->body.startBuffer > 2305 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) { 2306 VMW_DEBUG_USER("Invalid number of vertex buffers.\n"); 2307 return -EINVAL; 2308 } 2309 2310 for (i = 0; i < num; i++) { 2311 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2312 VMW_RES_DIRTY_NONE, 2313 user_surface_converter, 2314 &cmd->buf[i].sid, &res); 2315 if (unlikely(ret != 0)) 2316 return ret; 2317 2318 binding.bi.ctx = ctx_node->ctx; 2319 binding.bi.bt = vmw_ctx_binding_vb; 2320 binding.bi.res = res; 2321 binding.offset = cmd->buf[i].offset; 2322 binding.stride = cmd->buf[i].stride; 2323 binding.slot = i + cmd->body.startBuffer; 2324 2325 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot); 2326 } 2327 2328 return 0; 2329 } 2330 2331 /** 2332 * vmw_cmd_dx_set_index_buffer - Validate 2333 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command. 2334 * 2335 * @dev_priv: Pointer to a device private struct. 2336 * @sw_context: The software context being used for this batch. 2337 * @header: Pointer to the command header in the command stream. 2338 */ 2339 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv, 2340 struct vmw_sw_context *sw_context, 2341 SVGA3dCmdHeader *header) 2342 { 2343 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2344 struct vmw_ctx_bindinfo_ib binding; 2345 struct vmw_resource *res; 2346 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer); 2347 int ret; 2348 2349 if (!ctx_node) 2350 return -EINVAL; 2351 2352 cmd = container_of(header, typeof(*cmd), header); 2353 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2354 VMW_RES_DIRTY_NONE, user_surface_converter, 2355 &cmd->body.sid, &res); 2356 if (unlikely(ret != 0)) 2357 return ret; 2358 2359 binding.bi.ctx = ctx_node->ctx; 2360 binding.bi.res = res; 2361 binding.bi.bt = vmw_ctx_binding_ib; 2362 binding.offset = cmd->body.offset; 2363 binding.format = cmd->body.format; 2364 2365 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0); 2366 2367 return 0; 2368 } 2369 2370 /** 2371 * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS 2372 * command 2373 * 2374 * @dev_priv: Pointer to a device private struct. 2375 * @sw_context: The software context being used for this batch. 2376 * @header: Pointer to the command header in the command stream. 2377 */ 2378 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv, 2379 struct vmw_sw_context *sw_context, 2380 SVGA3dCmdHeader *header) 2381 { 2382 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) = 2383 container_of(header, typeof(*cmd), header); 2384 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) / 2385 sizeof(SVGA3dRenderTargetViewId); 2386 int ret; 2387 2388 if (num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS) { 2389 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n"); 2390 return -EINVAL; 2391 } 2392 2393 ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds, 2394 0, &cmd->body.depthStencilViewId, 1, 0); 2395 if (ret) 2396 return ret; 2397 2398 return vmw_view_bindings_add(sw_context, vmw_view_rt, 2399 vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1], 2400 num_rt_view, 0); 2401 } 2402 2403 /** 2404 * vmw_cmd_dx_clear_rendertarget_view - Validate 2405 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command 2406 * 2407 * @dev_priv: Pointer to a device private struct. 2408 * @sw_context: The software context being used for this batch. 2409 * @header: Pointer to the command header in the command stream. 2410 */ 2411 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv, 2412 struct vmw_sw_context *sw_context, 2413 SVGA3dCmdHeader *header) 2414 { 2415 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) = 2416 container_of(header, typeof(*cmd), header); 2417 struct vmw_resource *ret; 2418 2419 ret = vmw_view_id_val_add(sw_context, vmw_view_rt, 2420 cmd->body.renderTargetViewId); 2421 2422 return PTR_ERR_OR_ZERO(ret); 2423 } 2424 2425 /** 2426 * vmw_cmd_dx_clear_depthstencil_view - Validate 2427 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command 2428 * 2429 * @dev_priv: Pointer to a device private struct. 2430 * @sw_context: The software context being used for this batch. 2431 * @header: Pointer to the command header in the command stream. 2432 */ 2433 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv, 2434 struct vmw_sw_context *sw_context, 2435 SVGA3dCmdHeader *header) 2436 { 2437 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) = 2438 container_of(header, typeof(*cmd), header); 2439 struct vmw_resource *ret; 2440 2441 ret = vmw_view_id_val_add(sw_context, vmw_view_ds, 2442 cmd->body.depthStencilViewId); 2443 2444 return PTR_ERR_OR_ZERO(ret); 2445 } 2446 2447 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, 2448 struct vmw_sw_context *sw_context, 2449 SVGA3dCmdHeader *header) 2450 { 2451 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2452 struct vmw_resource *srf; 2453 struct vmw_resource *res; 2454 enum vmw_view_type view_type; 2455 int ret; 2456 /* 2457 * This is based on the fact that all affected define commands have the 2458 * same initial command body layout. 2459 */ 2460 struct { 2461 SVGA3dCmdHeader header; 2462 uint32 defined_id; 2463 uint32 sid; 2464 } *cmd; 2465 2466 if (!ctx_node) 2467 return -EINVAL; 2468 2469 view_type = vmw_view_cmd_to_type(header->id); 2470 if (view_type == vmw_view_max) 2471 return -EINVAL; 2472 2473 cmd = container_of(header, typeof(*cmd), header); 2474 if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) { 2475 VMW_DEBUG_USER("Invalid surface id.\n"); 2476 return -EINVAL; 2477 } 2478 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2479 VMW_RES_DIRTY_NONE, user_surface_converter, 2480 &cmd->sid, &srf); 2481 if (unlikely(ret != 0)) 2482 return ret; 2483 2484 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]); 2485 ret = vmw_cotable_notify(res, cmd->defined_id); 2486 if (unlikely(ret != 0)) 2487 return ret; 2488 2489 return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type, 2490 cmd->defined_id, header, 2491 header->size + sizeof(*header), 2492 &sw_context->staged_cmd_res); 2493 } 2494 2495 /** 2496 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command. 2497 * 2498 * @dev_priv: Pointer to a device private struct. 2499 * @sw_context: The software context being used for this batch. 2500 * @header: Pointer to the command header in the command stream. 2501 */ 2502 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv, 2503 struct vmw_sw_context *sw_context, 2504 SVGA3dCmdHeader *header) 2505 { 2506 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2507 struct vmw_ctx_bindinfo_so_target binding; 2508 struct vmw_resource *res; 2509 struct { 2510 SVGA3dCmdHeader header; 2511 SVGA3dCmdDXSetSOTargets body; 2512 SVGA3dSoTarget targets[]; 2513 } *cmd; 2514 int i, ret, num; 2515 2516 if (!ctx_node) 2517 return -EINVAL; 2518 2519 cmd = container_of(header, typeof(*cmd), header); 2520 num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget); 2521 2522 if (num > SVGA3D_DX_MAX_SOTARGETS) { 2523 VMW_DEBUG_USER("Invalid DX SO binding.\n"); 2524 return -EINVAL; 2525 } 2526 2527 for (i = 0; i < num; i++) { 2528 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2529 VMW_RES_DIRTY_SET, 2530 user_surface_converter, 2531 &cmd->targets[i].sid, &res); 2532 if (unlikely(ret != 0)) 2533 return ret; 2534 2535 binding.bi.ctx = ctx_node->ctx; 2536 binding.bi.res = res; 2537 binding.bi.bt = vmw_ctx_binding_so_target; 2538 binding.offset = cmd->targets[i].offset; 2539 binding.size = cmd->targets[i].sizeInBytes; 2540 binding.slot = i; 2541 2542 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot); 2543 } 2544 2545 return 0; 2546 } 2547 2548 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv, 2549 struct vmw_sw_context *sw_context, 2550 SVGA3dCmdHeader *header) 2551 { 2552 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2553 struct vmw_resource *res; 2554 /* 2555 * This is based on the fact that all affected define commands have 2556 * the same initial command body layout. 2557 */ 2558 struct { 2559 SVGA3dCmdHeader header; 2560 uint32 defined_id; 2561 } *cmd; 2562 enum vmw_so_type so_type; 2563 int ret; 2564 2565 if (!ctx_node) 2566 return -EINVAL; 2567 2568 so_type = vmw_so_cmd_to_type(header->id); 2569 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]); 2570 if (IS_ERR(res)) 2571 return PTR_ERR(res); 2572 cmd = container_of(header, typeof(*cmd), header); 2573 ret = vmw_cotable_notify(res, cmd->defined_id); 2574 2575 return ret; 2576 } 2577 2578 /** 2579 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE 2580 * command 2581 * 2582 * @dev_priv: Pointer to a device private struct. 2583 * @sw_context: The software context being used for this batch. 2584 * @header: Pointer to the command header in the command stream. 2585 */ 2586 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv, 2587 struct vmw_sw_context *sw_context, 2588 SVGA3dCmdHeader *header) 2589 { 2590 struct { 2591 SVGA3dCmdHeader header; 2592 union { 2593 SVGA3dCmdDXReadbackSubResource r_body; 2594 SVGA3dCmdDXInvalidateSubResource i_body; 2595 SVGA3dCmdDXUpdateSubResource u_body; 2596 SVGA3dSurfaceId sid; 2597 }; 2598 } *cmd; 2599 2600 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) != 2601 offsetof(typeof(*cmd), sid)); 2602 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) != 2603 offsetof(typeof(*cmd), sid)); 2604 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) != 2605 offsetof(typeof(*cmd), sid)); 2606 2607 cmd = container_of(header, typeof(*cmd), header); 2608 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2609 VMW_RES_DIRTY_NONE, user_surface_converter, 2610 &cmd->sid, NULL); 2611 } 2612 2613 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv, 2614 struct vmw_sw_context *sw_context, 2615 SVGA3dCmdHeader *header) 2616 { 2617 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2618 2619 if (!ctx_node) 2620 return -EINVAL; 2621 2622 return 0; 2623 } 2624 2625 /** 2626 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view 2627 * resource for removal. 2628 * 2629 * @dev_priv: Pointer to a device private struct. 2630 * @sw_context: The software context being used for this batch. 2631 * @header: Pointer to the command header in the command stream. 2632 * 2633 * Check that the view exists, and if it was not created using this command 2634 * batch, conditionally make this command a NOP. 2635 */ 2636 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, 2637 struct vmw_sw_context *sw_context, 2638 SVGA3dCmdHeader *header) 2639 { 2640 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2641 struct { 2642 SVGA3dCmdHeader header; 2643 union vmw_view_destroy body; 2644 } *cmd = container_of(header, typeof(*cmd), header); 2645 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id); 2646 struct vmw_resource *view; 2647 int ret; 2648 2649 if (!ctx_node) 2650 return -EINVAL; 2651 2652 ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type, 2653 &sw_context->staged_cmd_res, &view); 2654 if (ret || !view) 2655 return ret; 2656 2657 /* 2658 * If the view wasn't created during this command batch, it might 2659 * have been removed due to a context swapout, so add a 2660 * relocation to conditionally make this command a NOP to avoid 2661 * device errors. 2662 */ 2663 return vmw_resource_relocation_add(sw_context, view, 2664 vmw_ptr_diff(sw_context->buf_start, 2665 &cmd->header.id), 2666 vmw_res_rel_cond_nop); 2667 } 2668 2669 /** 2670 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command 2671 * 2672 * @dev_priv: Pointer to a device private struct. 2673 * @sw_context: The software context being used for this batch. 2674 * @header: Pointer to the command header in the command stream. 2675 */ 2676 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv, 2677 struct vmw_sw_context *sw_context, 2678 SVGA3dCmdHeader *header) 2679 { 2680 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2681 struct vmw_resource *res; 2682 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) = 2683 container_of(header, typeof(*cmd), header); 2684 int ret; 2685 2686 if (!ctx_node) 2687 return -EINVAL; 2688 2689 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER); 2690 ret = vmw_cotable_notify(res, cmd->body.shaderId); 2691 if (ret) 2692 return ret; 2693 2694 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx, 2695 cmd->body.shaderId, cmd->body.type, 2696 &sw_context->staged_cmd_res); 2697 } 2698 2699 /** 2700 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command 2701 * 2702 * @dev_priv: Pointer to a device private struct. 2703 * @sw_context: The software context being used for this batch. 2704 * @header: Pointer to the command header in the command stream. 2705 */ 2706 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv, 2707 struct vmw_sw_context *sw_context, 2708 SVGA3dCmdHeader *header) 2709 { 2710 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2711 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) = 2712 container_of(header, typeof(*cmd), header); 2713 int ret; 2714 2715 if (!ctx_node) 2716 return -EINVAL; 2717 2718 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0, 2719 &sw_context->staged_cmd_res); 2720 2721 return ret; 2722 } 2723 2724 /** 2725 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command 2726 * 2727 * @dev_priv: Pointer to a device private struct. 2728 * @sw_context: The software context being used for this batch. 2729 * @header: Pointer to the command header in the command stream. 2730 */ 2731 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv, 2732 struct vmw_sw_context *sw_context, 2733 SVGA3dCmdHeader *header) 2734 { 2735 struct vmw_resource *ctx; 2736 struct vmw_resource *res; 2737 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) = 2738 container_of(header, typeof(*cmd), header); 2739 int ret; 2740 2741 if (cmd->body.cid != SVGA3D_INVALID_ID) { 2742 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 2743 VMW_RES_DIRTY_SET, 2744 user_context_converter, &cmd->body.cid, 2745 &ctx); 2746 if (ret) 2747 return ret; 2748 } else { 2749 struct vmw_ctx_validation_info *ctx_node = 2750 VMW_GET_CTX_NODE(sw_context); 2751 2752 if (!ctx_node) 2753 return -EINVAL; 2754 2755 ctx = ctx_node->ctx; 2756 } 2757 2758 res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0); 2759 if (IS_ERR(res)) { 2760 VMW_DEBUG_USER("Could not find shader to bind.\n"); 2761 return PTR_ERR(res); 2762 } 2763 2764 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE, 2765 vmw_val_add_flag_noctx); 2766 if (ret) { 2767 VMW_DEBUG_USER("Error creating resource validation node.\n"); 2768 return ret; 2769 } 2770 2771 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, 2772 &cmd->body.mobid, 2773 cmd->body.offsetInBytes); 2774 } 2775 2776 /** 2777 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command 2778 * 2779 * @dev_priv: Pointer to a device private struct. 2780 * @sw_context: The software context being used for this batch. 2781 * @header: Pointer to the command header in the command stream. 2782 */ 2783 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv, 2784 struct vmw_sw_context *sw_context, 2785 SVGA3dCmdHeader *header) 2786 { 2787 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) = 2788 container_of(header, typeof(*cmd), header); 2789 struct vmw_resource *view; 2790 struct vmw_res_cache_entry *rcache; 2791 2792 view = vmw_view_id_val_add(sw_context, vmw_view_sr, 2793 cmd->body.shaderResourceViewId); 2794 if (IS_ERR(view)) 2795 return PTR_ERR(view); 2796 2797 /* 2798 * Normally the shader-resource view is not gpu-dirtying, but for 2799 * this particular command it is... 2800 * So mark the last looked-up surface, which is the surface 2801 * the view points to, gpu-dirty. 2802 */ 2803 rcache = &sw_context->res_cache[vmw_res_surface]; 2804 vmw_validation_res_set_dirty(sw_context->ctx, rcache->private, 2805 VMW_RES_DIRTY_SET); 2806 return 0; 2807 } 2808 2809 /** 2810 * vmw_cmd_dx_transfer_from_buffer - Validate 2811 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command 2812 * 2813 * @dev_priv: Pointer to a device private struct. 2814 * @sw_context: The software context being used for this batch. 2815 * @header: Pointer to the command header in the command stream. 2816 */ 2817 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv, 2818 struct vmw_sw_context *sw_context, 2819 SVGA3dCmdHeader *header) 2820 { 2821 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) = 2822 container_of(header, typeof(*cmd), header); 2823 int ret; 2824 2825 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2826 VMW_RES_DIRTY_NONE, user_surface_converter, 2827 &cmd->body.srcSid, NULL); 2828 if (ret != 0) 2829 return ret; 2830 2831 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2832 VMW_RES_DIRTY_SET, user_surface_converter, 2833 &cmd->body.destSid, NULL); 2834 } 2835 2836 /** 2837 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command 2838 * 2839 * @dev_priv: Pointer to a device private struct. 2840 * @sw_context: The software context being used for this batch. 2841 * @header: Pointer to the command header in the command stream. 2842 */ 2843 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv, 2844 struct vmw_sw_context *sw_context, 2845 SVGA3dCmdHeader *header) 2846 { 2847 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) = 2848 container_of(header, typeof(*cmd), header); 2849 2850 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)) 2851 return -EINVAL; 2852 2853 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2854 VMW_RES_DIRTY_SET, user_surface_converter, 2855 &cmd->body.surface.sid, NULL); 2856 } 2857 2858 static int vmw_cmd_sm5(struct vmw_private *dev_priv, 2859 struct vmw_sw_context *sw_context, 2860 SVGA3dCmdHeader *header) 2861 { 2862 if (!has_sm5_context(dev_priv)) 2863 return -EINVAL; 2864 2865 return 0; 2866 } 2867 2868 static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv, 2869 struct vmw_sw_context *sw_context, 2870 SVGA3dCmdHeader *header) 2871 { 2872 if (!has_sm5_context(dev_priv)) 2873 return -EINVAL; 2874 2875 return vmw_cmd_dx_view_define(dev_priv, sw_context, header); 2876 } 2877 2878 static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv, 2879 struct vmw_sw_context *sw_context, 2880 SVGA3dCmdHeader *header) 2881 { 2882 if (!has_sm5_context(dev_priv)) 2883 return -EINVAL; 2884 2885 return vmw_cmd_dx_view_remove(dev_priv, sw_context, header); 2886 } 2887 2888 static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv, 2889 struct vmw_sw_context *sw_context, 2890 SVGA3dCmdHeader *header) 2891 { 2892 struct { 2893 SVGA3dCmdHeader header; 2894 SVGA3dCmdDXClearUAViewUint body; 2895 } *cmd = container_of(header, typeof(*cmd), header); 2896 struct vmw_resource *ret; 2897 2898 if (!has_sm5_context(dev_priv)) 2899 return -EINVAL; 2900 2901 ret = vmw_view_id_val_add(sw_context, vmw_view_ua, 2902 cmd->body.uaViewId); 2903 2904 return PTR_ERR_OR_ZERO(ret); 2905 } 2906 2907 static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv, 2908 struct vmw_sw_context *sw_context, 2909 SVGA3dCmdHeader *header) 2910 { 2911 struct { 2912 SVGA3dCmdHeader header; 2913 SVGA3dCmdDXClearUAViewFloat body; 2914 } *cmd = container_of(header, typeof(*cmd), header); 2915 struct vmw_resource *ret; 2916 2917 if (!has_sm5_context(dev_priv)) 2918 return -EINVAL; 2919 2920 ret = vmw_view_id_val_add(sw_context, vmw_view_ua, 2921 cmd->body.uaViewId); 2922 2923 return PTR_ERR_OR_ZERO(ret); 2924 } 2925 2926 static int vmw_cmd_set_uav(struct vmw_private *dev_priv, 2927 struct vmw_sw_context *sw_context, 2928 SVGA3dCmdHeader *header) 2929 { 2930 struct { 2931 SVGA3dCmdHeader header; 2932 SVGA3dCmdDXSetUAViews body; 2933 } *cmd = container_of(header, typeof(*cmd), header); 2934 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) / 2935 sizeof(SVGA3dUAViewId); 2936 int ret; 2937 2938 if (!has_sm5_context(dev_priv)) 2939 return -EINVAL; 2940 2941 if (num_uav > vmw_max_num_uavs(dev_priv)) { 2942 VMW_DEBUG_USER("Invalid UAV binding.\n"); 2943 return -EINVAL; 2944 } 2945 2946 ret = vmw_view_bindings_add(sw_context, vmw_view_ua, 2947 vmw_ctx_binding_uav, 0, (void *)&cmd[1], 2948 num_uav, 0); 2949 if (ret) 2950 return ret; 2951 2952 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0, 2953 cmd->body.uavSpliceIndex); 2954 2955 return ret; 2956 } 2957 2958 static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv, 2959 struct vmw_sw_context *sw_context, 2960 SVGA3dCmdHeader *header) 2961 { 2962 struct { 2963 SVGA3dCmdHeader header; 2964 SVGA3dCmdDXSetCSUAViews body; 2965 } *cmd = container_of(header, typeof(*cmd), header); 2966 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) / 2967 sizeof(SVGA3dUAViewId); 2968 int ret; 2969 2970 if (!has_sm5_context(dev_priv)) 2971 return -EINVAL; 2972 2973 if (num_uav > vmw_max_num_uavs(dev_priv)) { 2974 VMW_DEBUG_USER("Invalid UAV binding.\n"); 2975 return -EINVAL; 2976 } 2977 2978 ret = vmw_view_bindings_add(sw_context, vmw_view_ua, 2979 vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1], 2980 num_uav, 0); 2981 if (ret) 2982 return ret; 2983 2984 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1, 2985 cmd->body.startIndex); 2986 2987 return ret; 2988 } 2989 2990 static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv, 2991 struct vmw_sw_context *sw_context, 2992 SVGA3dCmdHeader *header) 2993 { 2994 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 2995 struct vmw_resource *res; 2996 struct { 2997 SVGA3dCmdHeader header; 2998 SVGA3dCmdDXDefineStreamOutputWithMob body; 2999 } *cmd = container_of(header, typeof(*cmd), header); 3000 int ret; 3001 3002 if (!has_sm5_context(dev_priv)) 3003 return -EINVAL; 3004 3005 if (!ctx_node) { 3006 DRM_ERROR("DX Context not set.\n"); 3007 return -EINVAL; 3008 } 3009 3010 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT); 3011 ret = vmw_cotable_notify(res, cmd->body.soid); 3012 if (ret) 3013 return ret; 3014 3015 return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx, 3016 cmd->body.soid, 3017 &sw_context->staged_cmd_res); 3018 } 3019 3020 static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv, 3021 struct vmw_sw_context *sw_context, 3022 SVGA3dCmdHeader *header) 3023 { 3024 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 3025 struct vmw_resource *res; 3026 struct { 3027 SVGA3dCmdHeader header; 3028 SVGA3dCmdDXDestroyStreamOutput body; 3029 } *cmd = container_of(header, typeof(*cmd), header); 3030 3031 if (!ctx_node) { 3032 DRM_ERROR("DX Context not set.\n"); 3033 return -EINVAL; 3034 } 3035 3036 /* 3037 * When device does not support SM5 then streamoutput with mob command is 3038 * not available to user-space. Simply return in this case. 3039 */ 3040 if (!has_sm5_context(dev_priv)) 3041 return 0; 3042 3043 /* 3044 * With SM5 capable device if lookup fails then user-space probably used 3045 * old streamoutput define command. Return without an error. 3046 */ 3047 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx), 3048 cmd->body.soid); 3049 if (IS_ERR(res)) 3050 return 0; 3051 3052 return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid, 3053 &sw_context->staged_cmd_res); 3054 } 3055 3056 static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv, 3057 struct vmw_sw_context *sw_context, 3058 SVGA3dCmdHeader *header) 3059 { 3060 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 3061 struct vmw_resource *res; 3062 struct { 3063 SVGA3dCmdHeader header; 3064 SVGA3dCmdDXBindStreamOutput body; 3065 } *cmd = container_of(header, typeof(*cmd), header); 3066 int ret; 3067 3068 if (!has_sm5_context(dev_priv)) 3069 return -EINVAL; 3070 3071 if (!ctx_node) { 3072 DRM_ERROR("DX Context not set.\n"); 3073 return -EINVAL; 3074 } 3075 3076 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx), 3077 cmd->body.soid); 3078 if (IS_ERR(res)) { 3079 DRM_ERROR("Could not find streamoutput to bind.\n"); 3080 return PTR_ERR(res); 3081 } 3082 3083 vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes); 3084 3085 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE, 3086 vmw_val_add_flag_noctx); 3087 if (ret) { 3088 DRM_ERROR("Error creating resource validation node.\n"); 3089 return ret; 3090 } 3091 3092 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, 3093 &cmd->body.mobid, 3094 cmd->body.offsetInBytes); 3095 } 3096 3097 static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv, 3098 struct vmw_sw_context *sw_context, 3099 SVGA3dCmdHeader *header) 3100 { 3101 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 3102 struct vmw_resource *res; 3103 struct vmw_ctx_bindinfo_so binding; 3104 struct { 3105 SVGA3dCmdHeader header; 3106 SVGA3dCmdDXSetStreamOutput body; 3107 } *cmd = container_of(header, typeof(*cmd), header); 3108 int ret; 3109 3110 if (!ctx_node) { 3111 DRM_ERROR("DX Context not set.\n"); 3112 return -EINVAL; 3113 } 3114 3115 if (cmd->body.soid == SVGA3D_INVALID_ID) 3116 return 0; 3117 3118 /* 3119 * When device does not support SM5 then streamoutput with mob command is 3120 * not available to user-space. Simply return in this case. 3121 */ 3122 if (!has_sm5_context(dev_priv)) 3123 return 0; 3124 3125 /* 3126 * With SM5 capable device if lookup fails then user-space probably used 3127 * old streamoutput define command. Return without an error. 3128 */ 3129 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx), 3130 cmd->body.soid); 3131 if (IS_ERR(res)) { 3132 return 0; 3133 } 3134 3135 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE, 3136 vmw_val_add_flag_noctx); 3137 if (ret) { 3138 DRM_ERROR("Error creating resource validation node.\n"); 3139 return ret; 3140 } 3141 3142 binding.bi.ctx = ctx_node->ctx; 3143 binding.bi.res = res; 3144 binding.bi.bt = vmw_ctx_binding_so; 3145 binding.slot = 0; /* Only one SO set to context at a time. */ 3146 3147 vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0, 3148 binding.slot); 3149 3150 return ret; 3151 } 3152 3153 static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv, 3154 struct vmw_sw_context *sw_context, 3155 SVGA3dCmdHeader *header) 3156 { 3157 struct vmw_draw_indexed_instanced_indirect_cmd { 3158 SVGA3dCmdHeader header; 3159 SVGA3dCmdDXDrawIndexedInstancedIndirect body; 3160 } *cmd = container_of(header, typeof(*cmd), header); 3161 3162 if (!has_sm5_context(dev_priv)) 3163 return -EINVAL; 3164 3165 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 3166 VMW_RES_DIRTY_NONE, user_surface_converter, 3167 &cmd->body.argsBufferSid, NULL); 3168 } 3169 3170 static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv, 3171 struct vmw_sw_context *sw_context, 3172 SVGA3dCmdHeader *header) 3173 { 3174 struct vmw_draw_instanced_indirect_cmd { 3175 SVGA3dCmdHeader header; 3176 SVGA3dCmdDXDrawInstancedIndirect body; 3177 } *cmd = container_of(header, typeof(*cmd), header); 3178 3179 if (!has_sm5_context(dev_priv)) 3180 return -EINVAL; 3181 3182 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 3183 VMW_RES_DIRTY_NONE, user_surface_converter, 3184 &cmd->body.argsBufferSid, NULL); 3185 } 3186 3187 static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv, 3188 struct vmw_sw_context *sw_context, 3189 SVGA3dCmdHeader *header) 3190 { 3191 struct vmw_dispatch_indirect_cmd { 3192 SVGA3dCmdHeader header; 3193 SVGA3dCmdDXDispatchIndirect body; 3194 } *cmd = container_of(header, typeof(*cmd), header); 3195 3196 if (!has_sm5_context(dev_priv)) 3197 return -EINVAL; 3198 3199 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 3200 VMW_RES_DIRTY_NONE, user_surface_converter, 3201 &cmd->body.argsBufferSid, NULL); 3202 } 3203 3204 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, 3205 struct vmw_sw_context *sw_context, 3206 void *buf, uint32_t *size) 3207 { 3208 uint32_t size_remaining = *size; 3209 uint32_t cmd_id; 3210 3211 cmd_id = ((uint32_t *)buf)[0]; 3212 switch (cmd_id) { 3213 case SVGA_CMD_UPDATE: 3214 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); 3215 break; 3216 case SVGA_CMD_DEFINE_GMRFB: 3217 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB); 3218 break; 3219 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: 3220 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); 3221 break; 3222 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: 3223 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); 3224 break; 3225 default: 3226 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id); 3227 return -EINVAL; 3228 } 3229 3230 if (*size > size_remaining) { 3231 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n", 3232 cmd_id); 3233 return -EINVAL; 3234 } 3235 3236 if (unlikely(!sw_context->kernel)) { 3237 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id); 3238 return -EPERM; 3239 } 3240 3241 if (cmd_id == SVGA_CMD_DEFINE_GMRFB) 3242 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf); 3243 3244 return 0; 3245 } 3246 3247 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { 3248 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, 3249 false, false, false), 3250 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, 3251 false, false, false), 3252 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check, 3253 true, false, false), 3254 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check, 3255 true, false, false), 3256 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma, 3257 true, false, false), 3258 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid, 3259 false, false, false), 3260 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid, 3261 false, false, false), 3262 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check, 3263 true, false, false), 3264 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check, 3265 true, false, false), 3266 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check, 3267 true, false, false), 3268 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, 3269 &vmw_cmd_set_render_target_check, true, false, false), 3270 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state, 3271 true, false, false), 3272 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check, 3273 true, false, false), 3274 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check, 3275 true, false, false), 3276 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check, 3277 true, false, false), 3278 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check, 3279 true, false, false), 3280 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check, 3281 true, false, false), 3282 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check, 3283 true, false, false), 3284 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, 3285 false, false, false), 3286 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define, 3287 true, false, false), 3288 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy, 3289 true, false, false), 3290 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, 3291 true, false, false), 3292 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const, 3293 true, false, false), 3294 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, 3295 true, false, false), 3296 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, 3297 true, false, false), 3298 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query, 3299 true, false, false), 3300 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query, 3301 true, false, false), 3302 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query, 3303 true, false, false), 3304 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok, 3305 true, false, false), 3306 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, 3307 &vmw_cmd_blt_surf_screen_check, false, false, false), 3308 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid, 3309 false, false, false), 3310 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid, 3311 false, false, false), 3312 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid, 3313 false, false, false), 3314 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid, 3315 false, false, false), 3316 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid, 3317 false, false, false), 3318 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid, 3319 false, false, false), 3320 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid, 3321 false, false, false), 3322 VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false), 3323 VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false), 3324 VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false), 3325 VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false), 3326 VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false), 3327 VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false), 3328 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid, 3329 false, false, true), 3330 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid, 3331 false, false, true), 3332 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid, 3333 false, false, true), 3334 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, 3335 false, false, true), 3336 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid, 3337 false, false, true), 3338 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, 3339 false, false, true), 3340 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, 3341 false, false, true), 3342 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid, 3343 false, false, true), 3344 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface, 3345 true, false, true), 3346 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid, 3347 false, false, true), 3348 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image, 3349 true, false, true), 3350 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE, 3351 &vmw_cmd_update_gb_surface, true, false, true), 3352 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE, 3353 &vmw_cmd_readback_gb_image, true, false, true), 3354 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE, 3355 &vmw_cmd_readback_gb_surface, true, false, true), 3356 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE, 3357 &vmw_cmd_invalidate_gb_image, true, false, true), 3358 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE, 3359 &vmw_cmd_invalidate_gb_surface, true, false, true), 3360 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid, 3361 false, false, true), 3362 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid, 3363 false, false, true), 3364 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid, 3365 false, false, true), 3366 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid, 3367 false, false, true), 3368 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid, 3369 false, false, true), 3370 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid, 3371 false, false, true), 3372 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader, 3373 true, false, true), 3374 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid, 3375 false, false, true), 3376 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid, 3377 false, false, false), 3378 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query, 3379 true, false, true), 3380 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query, 3381 true, false, true), 3382 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query, 3383 true, false, true), 3384 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, 3385 true, false, true), 3386 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok, 3387 true, false, true), 3388 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, 3389 false, false, true), 3390 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, 3391 false, false, true), 3392 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid, 3393 false, false, true), 3394 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid, 3395 false, false, true), 3396 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid, 3397 false, false, true), 3398 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid, 3399 false, false, true), 3400 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid, 3401 false, false, true), 3402 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid, 3403 false, false, true), 3404 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, 3405 false, false, true), 3406 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, 3407 false, false, true), 3408 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, 3409 true, false, true), 3410 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid, 3411 false, false, true), 3412 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid, 3413 false, false, true), 3414 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid, 3415 false, false, true), 3416 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid, 3417 false, false, true), 3418 3419 /* SM commands */ 3420 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid, 3421 false, false, true), 3422 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid, 3423 false, false, true), 3424 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid, 3425 false, false, true), 3426 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid, 3427 false, false, true), 3428 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid, 3429 false, false, true), 3430 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER, 3431 &vmw_cmd_dx_set_single_constant_buffer, true, false, true), 3432 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES, 3433 &vmw_cmd_dx_set_shader_res, true, false, true), 3434 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader, 3435 true, false, true), 3436 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check, 3437 true, false, true), 3438 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check, 3439 true, false, true), 3440 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check, 3441 true, false, true), 3442 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check, 3443 true, false, true), 3444 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED, 3445 &vmw_cmd_dx_cid_check, true, false, true), 3446 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check, 3447 true, false, true), 3448 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS, 3449 &vmw_cmd_dx_set_vertex_buffers, true, false, true), 3450 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER, 3451 &vmw_cmd_dx_set_index_buffer, true, false, true), 3452 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS, 3453 &vmw_cmd_dx_set_rendertargets, true, false, true), 3454 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check, 3455 true, false, true), 3456 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE, 3457 &vmw_cmd_dx_cid_check, true, false, true), 3458 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE, 3459 &vmw_cmd_dx_cid_check, true, false, true), 3460 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query, 3461 true, false, true), 3462 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check, 3463 true, false, true), 3464 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query, 3465 true, false, true), 3466 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET, 3467 &vmw_cmd_dx_cid_check, true, false, true), 3468 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check, 3469 true, false, true), 3470 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check, 3471 true, false, true), 3472 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid, 3473 true, false, true), 3474 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check, 3475 true, false, true), 3476 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check, 3477 true, false, true), 3478 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check, 3479 true, false, true), 3480 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW, 3481 &vmw_cmd_dx_clear_rendertarget_view, true, false, true), 3482 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW, 3483 &vmw_cmd_dx_clear_depthstencil_view, true, false, true), 3484 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid, 3485 true, false, true), 3486 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips, 3487 true, false, true), 3488 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE, 3489 &vmw_cmd_dx_check_subresource, true, false, true), 3490 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE, 3491 &vmw_cmd_dx_check_subresource, true, false, true), 3492 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE, 3493 &vmw_cmd_dx_check_subresource, true, false, true), 3494 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW, 3495 &vmw_cmd_dx_view_define, true, false, true), 3496 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW, 3497 &vmw_cmd_dx_view_remove, true, false, true), 3498 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW, 3499 &vmw_cmd_dx_view_define, true, false, true), 3500 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW, 3501 &vmw_cmd_dx_view_remove, true, false, true), 3502 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW, 3503 &vmw_cmd_dx_view_define, true, false, true), 3504 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW, 3505 &vmw_cmd_dx_view_remove, true, false, true), 3506 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT, 3507 &vmw_cmd_dx_so_define, true, false, true), 3508 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT, 3509 &vmw_cmd_dx_cid_check, true, false, true), 3510 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE, 3511 &vmw_cmd_dx_so_define, true, false, true), 3512 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE, 3513 &vmw_cmd_dx_cid_check, true, false, true), 3514 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE, 3515 &vmw_cmd_dx_so_define, true, false, true), 3516 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE, 3517 &vmw_cmd_dx_cid_check, true, false, true), 3518 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE, 3519 &vmw_cmd_dx_so_define, true, false, true), 3520 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE, 3521 &vmw_cmd_dx_cid_check, true, false, true), 3522 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE, 3523 &vmw_cmd_dx_so_define, true, false, true), 3524 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE, 3525 &vmw_cmd_dx_cid_check, true, false, true), 3526 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER, 3527 &vmw_cmd_dx_define_shader, true, false, true), 3528 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER, 3529 &vmw_cmd_dx_destroy_shader, true, false, true), 3530 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER, 3531 &vmw_cmd_dx_bind_shader, true, false, true), 3532 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT, 3533 &vmw_cmd_dx_so_define, true, false, true), 3534 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT, 3535 &vmw_cmd_dx_destroy_streamoutput, true, false, true), 3536 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, 3537 &vmw_cmd_dx_set_streamoutput, true, false, true), 3538 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS, 3539 &vmw_cmd_dx_set_so_targets, true, false, true), 3540 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT, 3541 &vmw_cmd_dx_cid_check, true, false, true), 3542 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY, 3543 &vmw_cmd_dx_cid_check, true, false, true), 3544 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY, 3545 &vmw_cmd_buffer_copy_check, true, false, true), 3546 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION, 3547 &vmw_cmd_pred_copy_check, true, false, true), 3548 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER, 3549 &vmw_cmd_dx_transfer_from_buffer, 3550 true, false, true), 3551 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET, 3552 &vmw_cmd_dx_set_constant_buffer_offset, 3553 true, false, true), 3554 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET, 3555 &vmw_cmd_dx_set_constant_buffer_offset, 3556 true, false, true), 3557 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET, 3558 &vmw_cmd_dx_set_constant_buffer_offset, 3559 true, false, true), 3560 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET, 3561 &vmw_cmd_dx_set_constant_buffer_offset, 3562 true, false, true), 3563 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET, 3564 &vmw_cmd_dx_set_constant_buffer_offset, 3565 true, false, true), 3566 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET, 3567 &vmw_cmd_dx_set_constant_buffer_offset, 3568 true, false, true), 3569 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy, 3570 true, false, true), 3571 3572 /* 3573 * SM5 commands 3574 */ 3575 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define, 3576 true, false, true), 3577 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove, 3578 true, false, true), 3579 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint, 3580 true, false, true), 3581 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT, 3582 &vmw_cmd_clear_uav_float, true, false, true), 3583 VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true, 3584 false, true), 3585 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false, 3586 true), 3587 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT, 3588 &vmw_cmd_indexed_instanced_indirect, true, false, true), 3589 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT, 3590 &vmw_cmd_instanced_indirect, true, false, true), 3591 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true), 3592 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT, 3593 &vmw_cmd_dispatch_indirect, true, false, true), 3594 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true, 3595 false, true), 3596 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2, 3597 &vmw_cmd_sm5_view_define, true, false, true), 3598 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB, 3599 &vmw_cmd_dx_define_streamoutput, true, false, true), 3600 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT, 3601 &vmw_cmd_dx_bind_streamoutput, true, false, true), 3602 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2, 3603 &vmw_cmd_dx_so_define, true, false, true), 3604 }; 3605 3606 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd) 3607 { 3608 u32 cmd_id = ((u32 *) buf)[0]; 3609 3610 if (cmd_id >= SVGA_CMD_MAX) { 3611 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; 3612 const struct vmw_cmd_entry *entry; 3613 3614 *size = header->size + sizeof(SVGA3dCmdHeader); 3615 cmd_id = header->id; 3616 if (cmd_id >= SVGA_3D_CMD_MAX) 3617 return false; 3618 3619 cmd_id -= SVGA_3D_CMD_BASE; 3620 entry = &vmw_cmd_entries[cmd_id]; 3621 *cmd = entry->cmd_name; 3622 return true; 3623 } 3624 3625 switch (cmd_id) { 3626 case SVGA_CMD_UPDATE: 3627 *cmd = "SVGA_CMD_UPDATE"; 3628 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate); 3629 break; 3630 case SVGA_CMD_DEFINE_GMRFB: 3631 *cmd = "SVGA_CMD_DEFINE_GMRFB"; 3632 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB); 3633 break; 3634 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: 3635 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN"; 3636 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); 3637 break; 3638 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: 3639 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB"; 3640 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); 3641 break; 3642 default: 3643 *cmd = "UNKNOWN"; 3644 *size = 0; 3645 return false; 3646 } 3647 3648 return true; 3649 } 3650 3651 static int vmw_cmd_check(struct vmw_private *dev_priv, 3652 struct vmw_sw_context *sw_context, void *buf, 3653 uint32_t *size) 3654 { 3655 uint32_t cmd_id; 3656 uint32_t size_remaining = *size; 3657 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; 3658 int ret; 3659 const struct vmw_cmd_entry *entry; 3660 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; 3661 3662 cmd_id = ((uint32_t *)buf)[0]; 3663 /* Handle any none 3D commands */ 3664 if (unlikely(cmd_id < SVGA_CMD_MAX)) 3665 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); 3666 3667 3668 cmd_id = header->id; 3669 *size = header->size + sizeof(SVGA3dCmdHeader); 3670 3671 cmd_id -= SVGA_3D_CMD_BASE; 3672 if (unlikely(*size > size_remaining)) 3673 goto out_invalid; 3674 3675 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) 3676 goto out_invalid; 3677 3678 entry = &vmw_cmd_entries[cmd_id]; 3679 if (unlikely(!entry->func)) 3680 goto out_invalid; 3681 3682 if (unlikely(!entry->user_allow && !sw_context->kernel)) 3683 goto out_privileged; 3684 3685 if (unlikely(entry->gb_disable && gb)) 3686 goto out_old; 3687 3688 if (unlikely(entry->gb_enable && !gb)) 3689 goto out_new; 3690 3691 ret = entry->func(dev_priv, sw_context, header); 3692 if (unlikely(ret != 0)) { 3693 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n", 3694 cmd_id + SVGA_3D_CMD_BASE, ret); 3695 return ret; 3696 } 3697 3698 return 0; 3699 out_invalid: 3700 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n", 3701 cmd_id + SVGA_3D_CMD_BASE); 3702 return -EINVAL; 3703 out_privileged: 3704 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n", 3705 cmd_id + SVGA_3D_CMD_BASE); 3706 return -EPERM; 3707 out_old: 3708 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n", 3709 cmd_id + SVGA_3D_CMD_BASE); 3710 return -EINVAL; 3711 out_new: 3712 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n", 3713 cmd_id + SVGA_3D_CMD_BASE); 3714 return -EINVAL; 3715 } 3716 3717 static int vmw_cmd_check_all(struct vmw_private *dev_priv, 3718 struct vmw_sw_context *sw_context, void *buf, 3719 uint32_t size) 3720 { 3721 int32_t cur_size = size; 3722 int ret; 3723 3724 sw_context->buf_start = buf; 3725 3726 while (cur_size > 0) { 3727 size = cur_size; 3728 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); 3729 if (unlikely(ret != 0)) 3730 return ret; 3731 buf = (void *)((unsigned long) buf + size); 3732 cur_size -= size; 3733 } 3734 3735 if (unlikely(cur_size != 0)) { 3736 VMW_DEBUG_USER("Command verifier out of sync.\n"); 3737 return -EINVAL; 3738 } 3739 3740 return 0; 3741 } 3742 3743 static void vmw_free_relocations(struct vmw_sw_context *sw_context) 3744 { 3745 /* Memory is validation context memory, so no need to free it */ 3746 INIT_LIST_HEAD(&sw_context->bo_relocations); 3747 } 3748 3749 static void vmw_apply_relocations(struct vmw_sw_context *sw_context) 3750 { 3751 struct vmw_relocation *reloc; 3752 struct ttm_buffer_object *bo; 3753 3754 list_for_each_entry(reloc, &sw_context->bo_relocations, head) { 3755 bo = &reloc->vbo->base; 3756 switch (bo->resource->mem_type) { 3757 case TTM_PL_VRAM: 3758 reloc->location->offset += bo->resource->start << PAGE_SHIFT; 3759 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; 3760 break; 3761 case VMW_PL_GMR: 3762 reloc->location->gmrId = bo->resource->start; 3763 break; 3764 case VMW_PL_MOB: 3765 *reloc->mob_loc = bo->resource->start; 3766 break; 3767 default: 3768 BUG(); 3769 } 3770 } 3771 vmw_free_relocations(sw_context); 3772 } 3773 3774 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, 3775 uint32_t size) 3776 { 3777 if (likely(sw_context->cmd_bounce_size >= size)) 3778 return 0; 3779 3780 if (sw_context->cmd_bounce_size == 0) 3781 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE; 3782 3783 while (sw_context->cmd_bounce_size < size) { 3784 sw_context->cmd_bounce_size = 3785 PAGE_ALIGN(sw_context->cmd_bounce_size + 3786 (sw_context->cmd_bounce_size >> 1)); 3787 } 3788 3789 vfree(sw_context->cmd_bounce); 3790 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size); 3791 3792 if (sw_context->cmd_bounce == NULL) { 3793 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n"); 3794 sw_context->cmd_bounce_size = 0; 3795 return -ENOMEM; 3796 } 3797 3798 return 0; 3799 } 3800 3801 /* 3802 * vmw_execbuf_fence_commands - create and submit a command stream fence 3803 * 3804 * Creates a fence object and submits a command stream marker. 3805 * If this fails for some reason, We sync the fifo and return NULL. 3806 * It is then safe to fence buffers with a NULL pointer. 3807 * 3808 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a 3809 * userspace handle if @p_handle is not NULL, otherwise not. 3810 */ 3811 3812 int vmw_execbuf_fence_commands(struct drm_file *file_priv, 3813 struct vmw_private *dev_priv, 3814 struct vmw_fence_obj **p_fence, 3815 uint32_t *p_handle) 3816 { 3817 uint32_t sequence; 3818 int ret; 3819 bool synced = false; 3820 3821 /* p_handle implies file_priv. */ 3822 BUG_ON(p_handle != NULL && file_priv == NULL); 3823 3824 ret = vmw_cmd_send_fence(dev_priv, &sequence); 3825 if (unlikely(ret != 0)) { 3826 VMW_DEBUG_USER("Fence submission error. Syncing.\n"); 3827 synced = true; 3828 } 3829 3830 if (p_handle != NULL) 3831 ret = vmw_user_fence_create(file_priv, dev_priv->fman, 3832 sequence, p_fence, p_handle); 3833 else 3834 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence); 3835 3836 if (unlikely(ret != 0 && !synced)) { 3837 (void) vmw_fallback_wait(dev_priv, false, false, sequence, 3838 false, VMW_FENCE_WAIT_TIMEOUT); 3839 *p_fence = NULL; 3840 } 3841 3842 return ret; 3843 } 3844 3845 /** 3846 * vmw_execbuf_copy_fence_user - copy fence object information to user-space. 3847 * 3848 * @dev_priv: Pointer to a vmw_private struct. 3849 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file. 3850 * @ret: Return value from fence object creation. 3851 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which 3852 * the information should be copied. 3853 * @fence: Pointer to the fenc object. 3854 * @fence_handle: User-space fence handle. 3855 * @out_fence_fd: exported file descriptor for the fence. -1 if not used 3856 * 3857 * This function copies fence information to user-space. If copying fails, the 3858 * user-space struct drm_vmw_fence_rep::error member is hopefully left 3859 * untouched, and if it's preloaded with an -EFAULT by user-space, the error 3860 * will hopefully be detected. 3861 * 3862 * Also if copying fails, user-space will be unable to signal the fence object 3863 * so we wait for it immediately, and then unreference the user-space reference. 3864 */ 3865 int 3866 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, 3867 struct vmw_fpriv *vmw_fp, int ret, 3868 struct drm_vmw_fence_rep __user *user_fence_rep, 3869 struct vmw_fence_obj *fence, uint32_t fence_handle, 3870 int32_t out_fence_fd) 3871 { 3872 struct drm_vmw_fence_rep fence_rep; 3873 3874 if (user_fence_rep == NULL) 3875 return 0; 3876 3877 memset(&fence_rep, 0, sizeof(fence_rep)); 3878 3879 fence_rep.error = ret; 3880 fence_rep.fd = out_fence_fd; 3881 if (ret == 0) { 3882 BUG_ON(fence == NULL); 3883 3884 fence_rep.handle = fence_handle; 3885 fence_rep.seqno = fence->base.seqno; 3886 vmw_update_seqno(dev_priv); 3887 fence_rep.passed_seqno = dev_priv->last_read_seqno; 3888 } 3889 3890 /* 3891 * copy_to_user errors will be detected by user space not seeing 3892 * fence_rep::error filled in. Typically user-space would have pre-set 3893 * that member to -EFAULT. 3894 */ 3895 ret = copy_to_user(user_fence_rep, &fence_rep, 3896 sizeof(fence_rep)); 3897 3898 /* 3899 * User-space lost the fence object. We need to sync and unreference the 3900 * handle. 3901 */ 3902 if (unlikely(ret != 0) && (fence_rep.error == 0)) { 3903 ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle); 3904 VMW_DEBUG_USER("Fence copy error. Syncing.\n"); 3905 (void) vmw_fence_obj_wait(fence, false, false, 3906 VMW_FENCE_WAIT_TIMEOUT); 3907 } 3908 3909 return ret ? -EFAULT : 0; 3910 } 3911 3912 /** 3913 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo. 3914 * 3915 * @dev_priv: Pointer to a device private structure. 3916 * @kernel_commands: Pointer to the unpatched command batch. 3917 * @command_size: Size of the unpatched command batch. 3918 * @sw_context: Structure holding the relocation lists. 3919 * 3920 * Side effects: If this function returns 0, then the command batch pointed to 3921 * by @kernel_commands will have been modified. 3922 */ 3923 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv, 3924 void *kernel_commands, u32 command_size, 3925 struct vmw_sw_context *sw_context) 3926 { 3927 void *cmd; 3928 3929 if (sw_context->dx_ctx_node) 3930 cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size, 3931 sw_context->dx_ctx_node->ctx->id); 3932 else 3933 cmd = VMW_CMD_RESERVE(dev_priv, command_size); 3934 3935 if (!cmd) 3936 return -ENOMEM; 3937 3938 vmw_apply_relocations(sw_context); 3939 memcpy(cmd, kernel_commands, command_size); 3940 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); 3941 vmw_resource_relocations_free(&sw_context->res_relocations); 3942 vmw_cmd_commit(dev_priv, command_size); 3943 3944 return 0; 3945 } 3946 3947 /** 3948 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the 3949 * command buffer manager. 3950 * 3951 * @dev_priv: Pointer to a device private structure. 3952 * @header: Opaque handle to the command buffer allocation. 3953 * @command_size: Size of the unpatched command batch. 3954 * @sw_context: Structure holding the relocation lists. 3955 * 3956 * Side effects: If this function returns 0, then the command buffer represented 3957 * by @header will have been modified. 3958 */ 3959 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv, 3960 struct vmw_cmdbuf_header *header, 3961 u32 command_size, 3962 struct vmw_sw_context *sw_context) 3963 { 3964 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id : 3965 SVGA3D_INVALID_ID); 3966 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false, 3967 header); 3968 3969 vmw_apply_relocations(sw_context); 3970 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); 3971 vmw_resource_relocations_free(&sw_context->res_relocations); 3972 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false); 3973 3974 return 0; 3975 } 3976 3977 /** 3978 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for 3979 * submission using a command buffer. 3980 * 3981 * @dev_priv: Pointer to a device private structure. 3982 * @user_commands: User-space pointer to the commands to be submitted. 3983 * @command_size: Size of the unpatched command batch. 3984 * @header: Out parameter returning the opaque pointer to the command buffer. 3985 * 3986 * This function checks whether we can use the command buffer manager for 3987 * submission and if so, creates a command buffer of suitable size and copies 3988 * the user data into that buffer. 3989 * 3990 * On successful return, the function returns a pointer to the data in the 3991 * command buffer and *@header is set to non-NULL. 3992 * 3993 * @kernel_commands: If command buffers could not be used, the function will 3994 * return the value of @kernel_commands on function call. That value may be 3995 * NULL. In that case, the value of *@header will be set to NULL. 3996 * 3997 * If an error is encountered, the function will return a pointer error value. 3998 * If the function is interrupted by a signal while sleeping, it will return 3999 * -ERESTARTSYS casted to a pointer error value. 4000 */ 4001 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv, 4002 void __user *user_commands, 4003 void *kernel_commands, u32 command_size, 4004 struct vmw_cmdbuf_header **header) 4005 { 4006 size_t cmdbuf_size; 4007 int ret; 4008 4009 *header = NULL; 4010 if (command_size > SVGA_CB_MAX_SIZE) { 4011 VMW_DEBUG_USER("Command buffer is too large.\n"); 4012 return ERR_PTR(-EINVAL); 4013 } 4014 4015 if (!dev_priv->cman || kernel_commands) 4016 return kernel_commands; 4017 4018 /* If possible, add a little space for fencing. */ 4019 cmdbuf_size = command_size + 512; 4020 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE); 4021 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true, 4022 header); 4023 if (IS_ERR(kernel_commands)) 4024 return kernel_commands; 4025 4026 ret = copy_from_user(kernel_commands, user_commands, command_size); 4027 if (ret) { 4028 VMW_DEBUG_USER("Failed copying commands.\n"); 4029 vmw_cmdbuf_header_free(*header); 4030 *header = NULL; 4031 return ERR_PTR(-EFAULT); 4032 } 4033 4034 return kernel_commands; 4035 } 4036 4037 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv, 4038 struct vmw_sw_context *sw_context, 4039 uint32_t handle) 4040 { 4041 struct vmw_resource *res; 4042 int ret; 4043 unsigned int size; 4044 4045 if (handle == SVGA3D_INVALID_ID) 4046 return 0; 4047 4048 size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context); 4049 ret = vmw_validation_preload_res(sw_context->ctx, size); 4050 if (ret) 4051 return ret; 4052 4053 ret = vmw_user_resource_lookup_handle 4054 (dev_priv, sw_context->fp->tfile, handle, 4055 user_context_converter, &res); 4056 if (ret != 0) { 4057 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n", 4058 (unsigned int) handle); 4059 return ret; 4060 } 4061 4062 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET, 4063 vmw_val_add_flag_none); 4064 if (unlikely(ret != 0)) { 4065 vmw_resource_unreference(&res); 4066 return ret; 4067 } 4068 4069 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res); 4070 sw_context->man = vmw_context_res_man(res); 4071 4072 vmw_resource_unreference(&res); 4073 return 0; 4074 } 4075 4076 int vmw_execbuf_process(struct drm_file *file_priv, 4077 struct vmw_private *dev_priv, 4078 void __user *user_commands, void *kernel_commands, 4079 uint32_t command_size, uint64_t throttle_us, 4080 uint32_t dx_context_handle, 4081 struct drm_vmw_fence_rep __user *user_fence_rep, 4082 struct vmw_fence_obj **out_fence, uint32_t flags) 4083 { 4084 struct vmw_sw_context *sw_context = &dev_priv->ctx; 4085 struct vmw_fence_obj *fence = NULL; 4086 struct vmw_cmdbuf_header *header; 4087 uint32_t handle = 0; 4088 int ret; 4089 int32_t out_fence_fd = -1; 4090 struct sync_file *sync_file = NULL; 4091 DECLARE_VAL_CONTEXT(val_ctx, sw_context, 1); 4092 4093 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { 4094 out_fence_fd = get_unused_fd_flags(O_CLOEXEC); 4095 if (out_fence_fd < 0) { 4096 VMW_DEBUG_USER("Failed to get a fence fd.\n"); 4097 return out_fence_fd; 4098 } 4099 } 4100 4101 if (throttle_us) { 4102 VMW_DEBUG_USER("Throttling is no longer supported.\n"); 4103 } 4104 4105 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands, 4106 kernel_commands, command_size, 4107 &header); 4108 if (IS_ERR(kernel_commands)) { 4109 ret = PTR_ERR(kernel_commands); 4110 goto out_free_fence_fd; 4111 } 4112 4113 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); 4114 if (ret) { 4115 ret = -ERESTARTSYS; 4116 goto out_free_header; 4117 } 4118 4119 sw_context->kernel = false; 4120 if (kernel_commands == NULL) { 4121 ret = vmw_resize_cmd_bounce(sw_context, command_size); 4122 if (unlikely(ret != 0)) 4123 goto out_unlock; 4124 4125 ret = copy_from_user(sw_context->cmd_bounce, user_commands, 4126 command_size); 4127 if (unlikely(ret != 0)) { 4128 ret = -EFAULT; 4129 VMW_DEBUG_USER("Failed copying commands.\n"); 4130 goto out_unlock; 4131 } 4132 4133 kernel_commands = sw_context->cmd_bounce; 4134 } else if (!header) { 4135 sw_context->kernel = true; 4136 } 4137 4138 sw_context->filp = file_priv; 4139 sw_context->fp = vmw_fpriv(file_priv); 4140 INIT_LIST_HEAD(&sw_context->ctx_list); 4141 sw_context->cur_query_bo = dev_priv->pinned_bo; 4142 sw_context->last_query_ctx = NULL; 4143 sw_context->needs_post_query_barrier = false; 4144 sw_context->dx_ctx_node = NULL; 4145 sw_context->dx_query_mob = NULL; 4146 sw_context->dx_query_ctx = NULL; 4147 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); 4148 INIT_LIST_HEAD(&sw_context->res_relocations); 4149 INIT_LIST_HEAD(&sw_context->bo_relocations); 4150 4151 if (sw_context->staged_bindings) 4152 vmw_binding_state_reset(sw_context->staged_bindings); 4153 4154 INIT_LIST_HEAD(&sw_context->staged_cmd_res); 4155 sw_context->ctx = &val_ctx; 4156 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle); 4157 if (unlikely(ret != 0)) 4158 goto out_err_nores; 4159 4160 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, 4161 command_size); 4162 if (unlikely(ret != 0)) 4163 goto out_err_nores; 4164 4165 ret = vmw_resources_reserve(sw_context); 4166 if (unlikely(ret != 0)) 4167 goto out_err_nores; 4168 4169 ret = vmw_validation_bo_reserve(&val_ctx, true); 4170 if (unlikely(ret != 0)) 4171 goto out_err_nores; 4172 4173 ret = vmw_validation_bo_validate(&val_ctx, true); 4174 if (unlikely(ret != 0)) 4175 goto out_err; 4176 4177 ret = vmw_validation_res_validate(&val_ctx, true); 4178 if (unlikely(ret != 0)) 4179 goto out_err; 4180 4181 vmw_validation_drop_ht(&val_ctx); 4182 4183 ret = mutex_lock_interruptible(&dev_priv->binding_mutex); 4184 if (unlikely(ret != 0)) { 4185 ret = -ERESTARTSYS; 4186 goto out_err; 4187 } 4188 4189 if (dev_priv->has_mob) { 4190 ret = vmw_rebind_contexts(sw_context); 4191 if (unlikely(ret != 0)) 4192 goto out_unlock_binding; 4193 } 4194 4195 if (!header) { 4196 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands, 4197 command_size, sw_context); 4198 } else { 4199 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size, 4200 sw_context); 4201 header = NULL; 4202 } 4203 mutex_unlock(&dev_priv->binding_mutex); 4204 if (ret) 4205 goto out_err; 4206 4207 vmw_query_bo_switch_commit(dev_priv, sw_context); 4208 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence, 4209 (user_fence_rep) ? &handle : NULL); 4210 /* 4211 * This error is harmless, because if fence submission fails, 4212 * vmw_fifo_send_fence will sync. The error will be propagated to 4213 * user-space in @fence_rep 4214 */ 4215 if (ret != 0) 4216 VMW_DEBUG_USER("Fence submission error. Syncing.\n"); 4217 4218 vmw_execbuf_bindings_commit(sw_context, false); 4219 vmw_bind_dx_query_mob(sw_context); 4220 vmw_validation_res_unreserve(&val_ctx, false); 4221 4222 vmw_validation_bo_fence(sw_context->ctx, fence); 4223 4224 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid)) 4225 __vmw_execbuf_release_pinned_bo(dev_priv, fence); 4226 4227 /* 4228 * If anything fails here, give up trying to export the fence and do a 4229 * sync since the user mode will not be able to sync the fence itself. 4230 * This ensures we are still functionally correct. 4231 */ 4232 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { 4233 4234 sync_file = sync_file_create(&fence->base); 4235 if (!sync_file) { 4236 VMW_DEBUG_USER("Sync file create failed for fence\n"); 4237 put_unused_fd(out_fence_fd); 4238 out_fence_fd = -1; 4239 4240 (void) vmw_fence_obj_wait(fence, false, false, 4241 VMW_FENCE_WAIT_TIMEOUT); 4242 } 4243 } 4244 4245 ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, 4246 user_fence_rep, fence, handle, out_fence_fd); 4247 4248 if (sync_file) { 4249 if (ret) { 4250 /* usercopy of fence failed, put the file object */ 4251 fput(sync_file->file); 4252 put_unused_fd(out_fence_fd); 4253 } else { 4254 /* Link the fence with the FD created earlier */ 4255 fd_install(out_fence_fd, sync_file->file); 4256 } 4257 } 4258 4259 /* Don't unreference when handing fence out */ 4260 if (unlikely(out_fence != NULL)) { 4261 *out_fence = fence; 4262 fence = NULL; 4263 } else if (likely(fence != NULL)) { 4264 vmw_fence_obj_unreference(&fence); 4265 } 4266 4267 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res); 4268 mutex_unlock(&dev_priv->cmdbuf_mutex); 4269 4270 /* 4271 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks 4272 * in resource destruction paths. 4273 */ 4274 vmw_validation_unref_lists(&val_ctx); 4275 4276 return ret; 4277 4278 out_unlock_binding: 4279 mutex_unlock(&dev_priv->binding_mutex); 4280 out_err: 4281 vmw_validation_bo_backoff(&val_ctx); 4282 out_err_nores: 4283 vmw_execbuf_bindings_commit(sw_context, true); 4284 vmw_validation_res_unreserve(&val_ctx, true); 4285 vmw_resource_relocations_free(&sw_context->res_relocations); 4286 vmw_free_relocations(sw_context); 4287 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid)) 4288 __vmw_execbuf_release_pinned_bo(dev_priv, NULL); 4289 out_unlock: 4290 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res); 4291 vmw_validation_drop_ht(&val_ctx); 4292 WARN_ON(!list_empty(&sw_context->ctx_list)); 4293 mutex_unlock(&dev_priv->cmdbuf_mutex); 4294 4295 /* 4296 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks 4297 * in resource destruction paths. 4298 */ 4299 vmw_validation_unref_lists(&val_ctx); 4300 out_free_header: 4301 if (header) 4302 vmw_cmdbuf_header_free(header); 4303 out_free_fence_fd: 4304 if (out_fence_fd >= 0) 4305 put_unused_fd(out_fence_fd); 4306 4307 return ret; 4308 } 4309 4310 /** 4311 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer. 4312 * 4313 * @dev_priv: The device private structure. 4314 * 4315 * This function is called to idle the fifo and unpin the query buffer if the 4316 * normal way to do this hits an error, which should typically be extremely 4317 * rare. 4318 */ 4319 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) 4320 { 4321 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n"); 4322 4323 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); 4324 vmw_bo_pin_reserved(dev_priv->pinned_bo, false); 4325 if (dev_priv->dummy_query_bo_pinned) { 4326 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false); 4327 dev_priv->dummy_query_bo_pinned = false; 4328 } 4329 } 4330 4331 4332 /** 4333 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query 4334 * bo. 4335 * 4336 * @dev_priv: The device private structure. 4337 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a 4338 * query barrier that flushes all queries touching the current buffer pointed to 4339 * by @dev_priv->pinned_bo 4340 * 4341 * This function should be used to unpin the pinned query bo, or as a query 4342 * barrier when we need to make sure that all queries have finished before the 4343 * next fifo command. (For example on hardware context destructions where the 4344 * hardware may otherwise leak unfinished queries). 4345 * 4346 * This function does not return any failure codes, but make attempts to do safe 4347 * unpinning in case of errors. 4348 * 4349 * The function will synchronize on the previous query barrier, and will thus 4350 * not finish until that barrier has executed. 4351 * 4352 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before 4353 * calling this function. 4354 */ 4355 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, 4356 struct vmw_fence_obj *fence) 4357 { 4358 int ret = 0; 4359 struct vmw_fence_obj *lfence = NULL; 4360 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); 4361 4362 if (dev_priv->pinned_bo == NULL) 4363 goto out_unlock; 4364 4365 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false, 4366 false); 4367 if (ret) 4368 goto out_no_reserve; 4369 4370 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false, 4371 false); 4372 if (ret) 4373 goto out_no_reserve; 4374 4375 ret = vmw_validation_bo_reserve(&val_ctx, false); 4376 if (ret) 4377 goto out_no_reserve; 4378 4379 if (dev_priv->query_cid_valid) { 4380 BUG_ON(fence != NULL); 4381 ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid); 4382 if (ret) 4383 goto out_no_emit; 4384 dev_priv->query_cid_valid = false; 4385 } 4386 4387 vmw_bo_pin_reserved(dev_priv->pinned_bo, false); 4388 if (dev_priv->dummy_query_bo_pinned) { 4389 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false); 4390 dev_priv->dummy_query_bo_pinned = false; 4391 } 4392 if (fence == NULL) { 4393 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence, 4394 NULL); 4395 fence = lfence; 4396 } 4397 vmw_validation_bo_fence(&val_ctx, fence); 4398 if (lfence != NULL) 4399 vmw_fence_obj_unreference(&lfence); 4400 4401 vmw_validation_unref_lists(&val_ctx); 4402 vmw_bo_unreference(&dev_priv->pinned_bo); 4403 4404 out_unlock: 4405 return; 4406 out_no_emit: 4407 vmw_validation_bo_backoff(&val_ctx); 4408 out_no_reserve: 4409 vmw_validation_unref_lists(&val_ctx); 4410 vmw_execbuf_unpin_panic(dev_priv); 4411 vmw_bo_unreference(&dev_priv->pinned_bo); 4412 } 4413 4414 /** 4415 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo. 4416 * 4417 * @dev_priv: The device private structure. 4418 * 4419 * This function should be used to unpin the pinned query bo, or as a query 4420 * barrier when we need to make sure that all queries have finished before the 4421 * next fifo command. (For example on hardware context destructions where the 4422 * hardware may otherwise leak unfinished queries). 4423 * 4424 * This function does not return any failure codes, but make attempts to do safe 4425 * unpinning in case of errors. 4426 * 4427 * The function will synchronize on the previous query barrier, and will thus 4428 * not finish until that barrier has executed. 4429 */ 4430 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) 4431 { 4432 mutex_lock(&dev_priv->cmdbuf_mutex); 4433 if (dev_priv->query_cid_valid) 4434 __vmw_execbuf_release_pinned_bo(dev_priv, NULL); 4435 mutex_unlock(&dev_priv->cmdbuf_mutex); 4436 } 4437 4438 int vmw_execbuf_ioctl(struct drm_device *dev, void *data, 4439 struct drm_file *file_priv) 4440 { 4441 struct vmw_private *dev_priv = vmw_priv(dev); 4442 struct drm_vmw_execbuf_arg *arg = data; 4443 int ret; 4444 struct dma_fence *in_fence = NULL; 4445 4446 MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF); 4447 MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF); 4448 4449 /* 4450 * Extend the ioctl argument while maintaining backwards compatibility: 4451 * We take different code paths depending on the value of arg->version. 4452 * 4453 * Note: The ioctl argument is extended and zeropadded by core DRM. 4454 */ 4455 if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION || 4456 arg->version == 0)) { 4457 VMW_DEBUG_USER("Incorrect execbuf version.\n"); 4458 ret = -EINVAL; 4459 goto mksstats_out; 4460 } 4461 4462 switch (arg->version) { 4463 case 1: 4464 /* For v1 core DRM have extended + zeropadded the data */ 4465 arg->context_handle = (uint32_t) -1; 4466 break; 4467 case 2: 4468 default: 4469 /* For v2 and later core DRM would have correctly copied it */ 4470 break; 4471 } 4472 4473 /* If imported a fence FD from elsewhere, then wait on it */ 4474 if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) { 4475 in_fence = sync_file_get_fence(arg->imported_fence_fd); 4476 4477 if (!in_fence) { 4478 VMW_DEBUG_USER("Cannot get imported fence\n"); 4479 ret = -EINVAL; 4480 goto mksstats_out; 4481 } 4482 4483 ret = dma_fence_wait(in_fence, true); 4484 if (ret) 4485 goto out; 4486 } 4487 4488 ret = vmw_execbuf_process(file_priv, dev_priv, 4489 (void __user *)(unsigned long)arg->commands, 4490 NULL, arg->command_size, arg->throttle_us, 4491 arg->context_handle, 4492 (void __user *)(unsigned long)arg->fence_rep, 4493 NULL, arg->flags); 4494 4495 if (unlikely(ret != 0)) 4496 goto out; 4497 4498 vmw_kms_cursor_post_execbuf(dev_priv); 4499 4500 out: 4501 if (in_fence) 4502 dma_fence_put(in_fence); 4503 4504 mksstats_out: 4505 MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF); 4506 return ret; 4507 } 4508