1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 #include <linux/sync_file.h> 28 29 #include "vmwgfx_drv.h" 30 #include "vmwgfx_reg.h" 31 #include <drm/ttm/ttm_bo_api.h> 32 #include <drm/ttm/ttm_placement.h> 33 #include "vmwgfx_so.h" 34 #include "vmwgfx_binding.h" 35 36 #define VMW_RES_HT_ORDER 12 37 38 /* 39 * Helper macro to get dx_ctx_node if available otherwise print an error 40 * message. This is for use in command verifier function where if dx_ctx_node 41 * is not set then command is invalid. 42 */ 43 #define VMW_GET_CTX_NODE(__sw_context) \ 44 ({ \ 45 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \ 46 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \ 47 __sw_context->dx_ctx_node; \ 48 }); \ 49 }) 50 51 #define VMW_DECLARE_CMD_VAR(__var, __type) \ 52 struct { \ 53 SVGA3dCmdHeader header; \ 54 __type body; \ 55 } __var 56 57 /** 58 * struct vmw_relocation - Buffer object relocation 59 * 60 * @head: List head for the command submission context's relocation list 61 * @vbo: Non ref-counted pointer to buffer object 62 * @mob_loc: Pointer to location for mob id to be modified 63 * @location: Pointer to location for guest pointer to be modified 64 */ 65 struct vmw_relocation { 66 struct list_head head; 67 struct vmw_buffer_object *vbo; 68 union { 69 SVGAMobId *mob_loc; 70 SVGAGuestPtr *location; 71 }; 72 }; 73 74 /** 75 * enum vmw_resource_relocation_type - Relocation type for resources 76 * 77 * @vmw_res_rel_normal: Traditional relocation. The resource id in the 78 * command stream is replaced with the actual id after validation. 79 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced 80 * with a NOP. 81 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after 82 * validation is -1, the command is replaced with a NOP. Otherwise no action. 83 */ 84 enum vmw_resource_relocation_type { 85 vmw_res_rel_normal, 86 vmw_res_rel_nop, 87 vmw_res_rel_cond_nop, 88 vmw_res_rel_max 89 }; 90 91 /** 92 * struct vmw_resource_relocation - Relocation info for resources 93 * 94 * @head: List head for the software context's relocation list. 95 * @res: Non-ref-counted pointer to the resource. 96 * @offset: Offset of single byte entries into the command buffer where the id 97 * that needs fixup is located. 98 * @rel_type: Type of relocation. 99 */ 100 struct vmw_resource_relocation { 101 struct list_head head; 102 const struct vmw_resource *res; 103 u32 offset:29; 104 enum vmw_resource_relocation_type rel_type:3; 105 }; 106 107 /** 108 * struct vmw_ctx_validation_info - Extra validation metadata for contexts 109 * 110 * @head: List head of context list 111 * @ctx: The context resource 112 * @cur: The context's persistent binding state 113 * @staged: The binding state changes of this command buffer 114 */ 115 struct vmw_ctx_validation_info { 116 struct list_head head; 117 struct vmw_resource *ctx; 118 struct vmw_ctx_binding_state *cur; 119 struct vmw_ctx_binding_state *staged; 120 }; 121 122 /** 123 * struct vmw_cmd_entry - Describe a command for the verifier 124 * 125 * @user_allow: Whether allowed from the execbuf ioctl. 126 * @gb_disable: Whether disabled if guest-backed objects are available. 127 * @gb_enable: Whether enabled iff guest-backed objects are available. 128 */ 129 struct vmw_cmd_entry { 130 int (*func) (struct vmw_private *, struct vmw_sw_context *, 131 SVGA3dCmdHeader *); 132 bool user_allow; 133 bool gb_disable; 134 bool gb_enable; 135 const char *cmd_name; 136 }; 137 138 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ 139 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ 140 (_gb_disable), (_gb_enable), #_cmd} 141 142 static int vmw_resource_context_res_add(struct vmw_private *dev_priv, 143 struct vmw_sw_context *sw_context, 144 struct vmw_resource *ctx); 145 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, 146 struct vmw_sw_context *sw_context, 147 SVGAMobId *id, 148 struct vmw_buffer_object **vmw_bo_p); 149 /** 150 * vmw_ptr_diff - Compute the offset from a to b in bytes 151 * 152 * @a: A starting pointer. 153 * @b: A pointer offset in the same address space. 154 * 155 * Returns: The offset in bytes between the two pointers. 156 */ 157 static size_t vmw_ptr_diff(void *a, void *b) 158 { 159 return (unsigned long) b - (unsigned long) a; 160 } 161 162 /** 163 * vmw_execbuf_bindings_commit - Commit modified binding state 164 * 165 * @sw_context: The command submission context 166 * @backoff: Whether this is part of the error path and binding state changes 167 * should be ignored 168 */ 169 static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context, 170 bool backoff) 171 { 172 struct vmw_ctx_validation_info *entry; 173 174 list_for_each_entry(entry, &sw_context->ctx_list, head) { 175 if (!backoff) 176 vmw_binding_state_commit(entry->cur, entry->staged); 177 178 if (entry->staged != sw_context->staged_bindings) 179 vmw_binding_state_free(entry->staged); 180 else 181 sw_context->staged_bindings_inuse = false; 182 } 183 184 /* List entries are freed with the validation context */ 185 INIT_LIST_HEAD(&sw_context->ctx_list); 186 } 187 188 /** 189 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced 190 * 191 * @sw_context: The command submission context 192 */ 193 static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context) 194 { 195 if (sw_context->dx_query_mob) 196 vmw_context_bind_dx_query(sw_context->dx_query_ctx, 197 sw_context->dx_query_mob); 198 } 199 200 /** 201 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to 202 * the validate list. 203 * 204 * @dev_priv: Pointer to the device private: 205 * @sw_context: The command submission context 206 * @node: The validation node holding the context resource metadata 207 */ 208 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, 209 struct vmw_sw_context *sw_context, 210 struct vmw_resource *res, 211 struct vmw_ctx_validation_info *node) 212 { 213 int ret; 214 215 ret = vmw_resource_context_res_add(dev_priv, sw_context, res); 216 if (unlikely(ret != 0)) 217 goto out_err; 218 219 if (!sw_context->staged_bindings) { 220 sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv); 221 if (IS_ERR(sw_context->staged_bindings)) { 222 ret = PTR_ERR(sw_context->staged_bindings); 223 sw_context->staged_bindings = NULL; 224 goto out_err; 225 } 226 } 227 228 if (sw_context->staged_bindings_inuse) { 229 node->staged = vmw_binding_state_alloc(dev_priv); 230 if (IS_ERR(node->staged)) { 231 ret = PTR_ERR(node->staged); 232 node->staged = NULL; 233 goto out_err; 234 } 235 } else { 236 node->staged = sw_context->staged_bindings; 237 sw_context->staged_bindings_inuse = true; 238 } 239 240 node->ctx = res; 241 node->cur = vmw_context_binding_state(res); 242 list_add_tail(&node->head, &sw_context->ctx_list); 243 244 return 0; 245 246 out_err: 247 return ret; 248 } 249 250 /** 251 * vmw_execbuf_res_size - calculate extra size fore the resource validation node 252 * 253 * @dev_priv: Pointer to the device private struct. 254 * @res_type: The resource type. 255 * 256 * Guest-backed contexts and DX contexts require extra size to store execbuf 257 * private information in the validation node. Typically the binding manager 258 * associated data structures. 259 * 260 * Returns: The extra size requirement based on resource type. 261 */ 262 static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv, 263 enum vmw_res_type res_type) 264 { 265 return (res_type == vmw_res_dx_context || 266 (res_type == vmw_res_context && dev_priv->has_mob)) ? 267 sizeof(struct vmw_ctx_validation_info) : 0; 268 } 269 270 /** 271 * vmw_execbuf_rcache_update - Update a resource-node cache entry 272 * 273 * @rcache: Pointer to the entry to update. 274 * @res: Pointer to the resource. 275 * @private: Pointer to the execbuf-private space in the resource validation 276 * node. 277 */ 278 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache, 279 struct vmw_resource *res, 280 void *private) 281 { 282 rcache->res = res; 283 rcache->private = private; 284 rcache->valid = 1; 285 rcache->valid_handle = 0; 286 } 287 288 /** 289 * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced 290 * rcu-protected pointer to the validation list. 291 * 292 * @sw_context: Pointer to the software context. 293 * @res: Unreferenced rcu-protected pointer to the resource. 294 * @dirty: Whether to change dirty status. 295 * 296 * Returns: 0 on success. Negative error code on failure. Typical error codes 297 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed. 298 */ 299 static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context, 300 struct vmw_resource *res, 301 u32 dirty) 302 { 303 struct vmw_private *dev_priv = res->dev_priv; 304 int ret; 305 enum vmw_res_type res_type = vmw_res_type(res); 306 struct vmw_res_cache_entry *rcache; 307 struct vmw_ctx_validation_info *ctx_info; 308 bool first_usage; 309 unsigned int priv_size; 310 311 rcache = &sw_context->res_cache[res_type]; 312 if (likely(rcache->valid && rcache->res == res)) { 313 if (dirty) 314 vmw_validation_res_set_dirty(sw_context->ctx, 315 rcache->private, dirty); 316 vmw_user_resource_noref_release(); 317 return 0; 318 } 319 320 priv_size = vmw_execbuf_res_size(dev_priv, res_type); 321 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size, 322 dirty, (void **)&ctx_info, 323 &first_usage); 324 vmw_user_resource_noref_release(); 325 if (ret) 326 return ret; 327 328 if (priv_size && first_usage) { 329 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res, 330 ctx_info); 331 if (ret) { 332 VMW_DEBUG_USER("Failed first usage context setup.\n"); 333 return ret; 334 } 335 } 336 337 vmw_execbuf_rcache_update(rcache, res, ctx_info); 338 return 0; 339 } 340 341 /** 342 * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource 343 * validation list if it's not already on it 344 * 345 * @sw_context: Pointer to the software context. 346 * @res: Pointer to the resource. 347 * @dirty: Whether to change dirty status. 348 * 349 * Returns: Zero on success. Negative error code on failure. 350 */ 351 static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context, 352 struct vmw_resource *res, 353 u32 dirty) 354 { 355 struct vmw_res_cache_entry *rcache; 356 enum vmw_res_type res_type = vmw_res_type(res); 357 void *ptr; 358 int ret; 359 360 rcache = &sw_context->res_cache[res_type]; 361 if (likely(rcache->valid && rcache->res == res)) { 362 if (dirty) 363 vmw_validation_res_set_dirty(sw_context->ctx, 364 rcache->private, dirty); 365 return 0; 366 } 367 368 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty, 369 &ptr, NULL); 370 if (ret) 371 return ret; 372 373 vmw_execbuf_rcache_update(rcache, res, ptr); 374 375 return 0; 376 } 377 378 /** 379 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the 380 * validation list 381 * 382 * @sw_context: The software context holding the validation list. 383 * @view: Pointer to the view resource. 384 * 385 * Returns 0 if success, negative error code otherwise. 386 */ 387 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context, 388 struct vmw_resource *view) 389 { 390 int ret; 391 392 /* 393 * First add the resource the view is pointing to, otherwise it may be 394 * swapped out when the view is validated. 395 */ 396 ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view), 397 vmw_view_dirtying(view)); 398 if (ret) 399 return ret; 400 401 return vmw_execbuf_res_noctx_val_add(sw_context, view, 402 VMW_RES_DIRTY_NONE); 403 } 404 405 /** 406 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing 407 * to to the validation list. 408 * 409 * @sw_context: The software context holding the validation list. 410 * @view_type: The view type to look up. 411 * @id: view id of the view. 412 * 413 * The view is represented by a view id and the DX context it's created on, or 414 * scheduled for creation on. If there is no DX context set, the function will 415 * return an -EINVAL error pointer. 416 * 417 * Returns: Unreferenced pointer to the resource on success, negative error 418 * pointer on failure. 419 */ 420 static struct vmw_resource * 421 vmw_view_id_val_add(struct vmw_sw_context *sw_context, 422 enum vmw_view_type view_type, u32 id) 423 { 424 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; 425 struct vmw_resource *view; 426 int ret; 427 428 if (!ctx_node) 429 return ERR_PTR(-EINVAL); 430 431 view = vmw_view_lookup(sw_context->man, view_type, id); 432 if (IS_ERR(view)) 433 return view; 434 435 ret = vmw_view_res_val_add(sw_context, view); 436 if (ret) 437 return ERR_PTR(ret); 438 439 return view; 440 } 441 442 /** 443 * vmw_resource_context_res_add - Put resources previously bound to a context on 444 * the validation list 445 * 446 * @dev_priv: Pointer to a device private structure 447 * @sw_context: Pointer to a software context used for this command submission 448 * @ctx: Pointer to the context resource 449 * 450 * This function puts all resources that were previously bound to @ctx on the 451 * resource validation list. This is part of the context state reemission 452 */ 453 static int vmw_resource_context_res_add(struct vmw_private *dev_priv, 454 struct vmw_sw_context *sw_context, 455 struct vmw_resource *ctx) 456 { 457 struct list_head *binding_list; 458 struct vmw_ctx_bindinfo *entry; 459 int ret = 0; 460 struct vmw_resource *res; 461 u32 i; 462 463 /* Add all cotables to the validation list. */ 464 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) { 465 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { 466 res = vmw_context_cotable(ctx, i); 467 if (IS_ERR(res)) 468 continue; 469 470 ret = vmw_execbuf_res_noctx_val_add(sw_context, res, 471 VMW_RES_DIRTY_SET); 472 if (unlikely(ret != 0)) 473 return ret; 474 } 475 } 476 477 /* Add all resources bound to the context to the validation list */ 478 mutex_lock(&dev_priv->binding_mutex); 479 binding_list = vmw_context_binding_list(ctx); 480 481 list_for_each_entry(entry, binding_list, ctx_list) { 482 if (vmw_res_type(entry->res) == vmw_res_view) 483 ret = vmw_view_res_val_add(sw_context, entry->res); 484 else 485 ret = vmw_execbuf_res_noctx_val_add 486 (sw_context, entry->res, 487 vmw_binding_dirtying(entry->bt)); 488 if (unlikely(ret != 0)) 489 break; 490 } 491 492 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) { 493 struct vmw_buffer_object *dx_query_mob; 494 495 dx_query_mob = vmw_context_get_dx_query_mob(ctx); 496 if (dx_query_mob) 497 ret = vmw_validation_add_bo(sw_context->ctx, 498 dx_query_mob, true, false); 499 } 500 501 mutex_unlock(&dev_priv->binding_mutex); 502 return ret; 503 } 504 505 /** 506 * vmw_resource_relocation_add - Add a relocation to the relocation list 507 * 508 * @list: Pointer to head of relocation list. 509 * @res: The resource. 510 * @offset: Offset into the command buffer currently being parsed where the id 511 * that needs fixup is located. Granularity is one byte. 512 * @rel_type: Relocation type. 513 */ 514 static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context, 515 const struct vmw_resource *res, 516 unsigned long offset, 517 enum vmw_resource_relocation_type 518 rel_type) 519 { 520 struct vmw_resource_relocation *rel; 521 522 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel)); 523 if (unlikely(!rel)) { 524 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n"); 525 return -ENOMEM; 526 } 527 528 rel->res = res; 529 rel->offset = offset; 530 rel->rel_type = rel_type; 531 list_add_tail(&rel->head, &sw_context->res_relocations); 532 533 return 0; 534 } 535 536 /** 537 * vmw_resource_relocations_free - Free all relocations on a list 538 * 539 * @list: Pointer to the head of the relocation list 540 */ 541 static void vmw_resource_relocations_free(struct list_head *list) 542 { 543 /* Memory is validation context memory, so no need to free it */ 544 INIT_LIST_HEAD(list); 545 } 546 547 /** 548 * vmw_resource_relocations_apply - Apply all relocations on a list 549 * 550 * @cb: Pointer to the start of the command buffer bein patch. This need not be 551 * the same buffer as the one being parsed when the relocation list was built, 552 * but the contents must be the same modulo the resource ids. 553 * @list: Pointer to the head of the relocation list. 554 */ 555 static void vmw_resource_relocations_apply(uint32_t *cb, 556 struct list_head *list) 557 { 558 struct vmw_resource_relocation *rel; 559 560 /* Validate the struct vmw_resource_relocation member size */ 561 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29)); 562 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3)); 563 564 list_for_each_entry(rel, list, head) { 565 u32 *addr = (u32 *)((unsigned long) cb + rel->offset); 566 switch (rel->rel_type) { 567 case vmw_res_rel_normal: 568 *addr = rel->res->id; 569 break; 570 case vmw_res_rel_nop: 571 *addr = SVGA_3D_CMD_NOP; 572 break; 573 default: 574 if (rel->res->id == -1) 575 *addr = SVGA_3D_CMD_NOP; 576 break; 577 } 578 } 579 } 580 581 static int vmw_cmd_invalid(struct vmw_private *dev_priv, 582 struct vmw_sw_context *sw_context, 583 SVGA3dCmdHeader *header) 584 { 585 return -EINVAL; 586 } 587 588 static int vmw_cmd_ok(struct vmw_private *dev_priv, 589 struct vmw_sw_context *sw_context, 590 SVGA3dCmdHeader *header) 591 { 592 return 0; 593 } 594 595 /** 596 * vmw_resources_reserve - Reserve all resources on the sw_context's resource 597 * list. 598 * 599 * @sw_context: Pointer to the software context. 600 * 601 * Note that since vmware's command submission currently is protected by the 602 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since 603 * only a single thread at once will attempt this. 604 */ 605 static int vmw_resources_reserve(struct vmw_sw_context *sw_context) 606 { 607 int ret; 608 609 ret = vmw_validation_res_reserve(sw_context->ctx, true); 610 if (ret) 611 return ret; 612 613 if (sw_context->dx_query_mob) { 614 struct vmw_buffer_object *expected_dx_query_mob; 615 616 expected_dx_query_mob = 617 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx); 618 if (expected_dx_query_mob && 619 expected_dx_query_mob != sw_context->dx_query_mob) { 620 ret = -EINVAL; 621 } 622 } 623 624 return ret; 625 } 626 627 /** 628 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the 629 * resource validate list unless it's already there. 630 * 631 * @dev_priv: Pointer to a device private structure. 632 * @sw_context: Pointer to the software context. 633 * @res_type: Resource type. 634 * @dirty: Whether to change dirty status. 635 * @converter: User-space visisble type specific information. 636 * @id_loc: Pointer to the location in the command buffer currently being parsed 637 * from where the user-space resource id handle is located. 638 * @p_val: Pointer to pointer to resource validalidation node. Populated on 639 * exit. 640 */ 641 static int 642 vmw_cmd_res_check(struct vmw_private *dev_priv, 643 struct vmw_sw_context *sw_context, 644 enum vmw_res_type res_type, 645 u32 dirty, 646 const struct vmw_user_resource_conv *converter, 647 uint32_t *id_loc, 648 struct vmw_resource **p_res) 649 { 650 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type]; 651 struct vmw_resource *res; 652 int ret; 653 654 if (p_res) 655 *p_res = NULL; 656 657 if (*id_loc == SVGA3D_INVALID_ID) { 658 if (res_type == vmw_res_context) { 659 VMW_DEBUG_USER("Illegal context invalid id.\n"); 660 return -EINVAL; 661 } 662 return 0; 663 } 664 665 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) { 666 res = rcache->res; 667 if (dirty) 668 vmw_validation_res_set_dirty(sw_context->ctx, 669 rcache->private, dirty); 670 } else { 671 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type); 672 673 ret = vmw_validation_preload_res(sw_context->ctx, size); 674 if (ret) 675 return ret; 676 677 res = vmw_user_resource_noref_lookup_handle 678 (dev_priv, sw_context->fp->tfile, *id_loc, converter); 679 if (IS_ERR(res)) { 680 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n", 681 (unsigned int) *id_loc); 682 return PTR_ERR(res); 683 } 684 685 ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty); 686 if (unlikely(ret != 0)) 687 return ret; 688 689 if (rcache->valid && rcache->res == res) { 690 rcache->valid_handle = true; 691 rcache->handle = *id_loc; 692 } 693 } 694 695 ret = vmw_resource_relocation_add(sw_context, res, 696 vmw_ptr_diff(sw_context->buf_start, 697 id_loc), 698 vmw_res_rel_normal); 699 if (p_res) 700 *p_res = res; 701 702 return 0; 703 } 704 705 /** 706 * vmw_rebind_dx_query - Rebind DX query associated with the context 707 * 708 * @ctx_res: context the query belongs to 709 * 710 * This function assumes binding_mutex is held. 711 */ 712 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) 713 { 714 struct vmw_private *dev_priv = ctx_res->dev_priv; 715 struct vmw_buffer_object *dx_query_mob; 716 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery); 717 718 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res); 719 720 if (!dx_query_mob || dx_query_mob->dx_query_ctx) 721 return 0; 722 723 cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id); 724 if (cmd == NULL) 725 return -ENOMEM; 726 727 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY; 728 cmd->header.size = sizeof(cmd->body); 729 cmd->body.cid = ctx_res->id; 730 cmd->body.mobid = dx_query_mob->base.mem.start; 731 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 732 733 vmw_context_bind_dx_query(ctx_res, dx_query_mob); 734 735 return 0; 736 } 737 738 /** 739 * vmw_rebind_contexts - Rebind all resources previously bound to referenced 740 * contexts. 741 * 742 * @sw_context: Pointer to the software context. 743 * 744 * Rebind context binding points that have been scrubbed because of eviction. 745 */ 746 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) 747 { 748 struct vmw_ctx_validation_info *val; 749 int ret; 750 751 list_for_each_entry(val, &sw_context->ctx_list, head) { 752 ret = vmw_binding_rebind_all(val->cur); 753 if (unlikely(ret != 0)) { 754 if (ret != -ERESTARTSYS) 755 VMW_DEBUG_USER("Failed to rebind context.\n"); 756 return ret; 757 } 758 759 ret = vmw_rebind_all_dx_query(val->ctx); 760 if (ret != 0) { 761 VMW_DEBUG_USER("Failed to rebind queries.\n"); 762 return ret; 763 } 764 } 765 766 return 0; 767 } 768 769 /** 770 * vmw_view_bindings_add - Add an array of view bindings to a context binding 771 * state tracker. 772 * 773 * @sw_context: The execbuf state used for this command. 774 * @view_type: View type for the bindings. 775 * @binding_type: Binding type for the bindings. 776 * @shader_slot: The shader slot to user for the bindings. 777 * @view_ids: Array of view ids to be bound. 778 * @num_views: Number of view ids in @view_ids. 779 * @first_slot: The binding slot to be used for the first view id in @view_ids. 780 */ 781 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context, 782 enum vmw_view_type view_type, 783 enum vmw_ctx_binding_type binding_type, 784 uint32 shader_slot, 785 uint32 view_ids[], u32 num_views, 786 u32 first_slot) 787 { 788 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 789 u32 i; 790 791 if (!ctx_node) 792 return -EINVAL; 793 794 for (i = 0; i < num_views; ++i) { 795 struct vmw_ctx_bindinfo_view binding; 796 struct vmw_resource *view = NULL; 797 798 if (view_ids[i] != SVGA3D_INVALID_ID) { 799 view = vmw_view_id_val_add(sw_context, view_type, 800 view_ids[i]); 801 if (IS_ERR(view)) { 802 VMW_DEBUG_USER("View not found.\n"); 803 return PTR_ERR(view); 804 } 805 } 806 binding.bi.ctx = ctx_node->ctx; 807 binding.bi.res = view; 808 binding.bi.bt = binding_type; 809 binding.shader_slot = shader_slot; 810 binding.slot = first_slot + i; 811 vmw_binding_add(ctx_node->staged, &binding.bi, 812 shader_slot, binding.slot); 813 } 814 815 return 0; 816 } 817 818 /** 819 * vmw_cmd_cid_check - Check a command header for valid context information. 820 * 821 * @dev_priv: Pointer to a device private structure. 822 * @sw_context: Pointer to the software context. 823 * @header: A command header with an embedded user-space context handle. 824 * 825 * Convenience function: Call vmw_cmd_res_check with the user-space context 826 * handle embedded in @header. 827 */ 828 static int vmw_cmd_cid_check(struct vmw_private *dev_priv, 829 struct vmw_sw_context *sw_context, 830 SVGA3dCmdHeader *header) 831 { 832 VMW_DECLARE_CMD_VAR(*cmd, uint32_t) = 833 container_of(header, typeof(*cmd), header); 834 835 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 836 VMW_RES_DIRTY_SET, user_context_converter, 837 &cmd->body, NULL); 838 } 839 840 /** 841 * vmw_execbuf_info_from_res - Get the private validation metadata for a 842 * recently validated resource 843 * 844 * @sw_context: Pointer to the command submission context 845 * @res: The resource 846 * 847 * The resource pointed to by @res needs to be present in the command submission 848 * context's resource cache and hence the last resource of that type to be 849 * processed by the validation code. 850 * 851 * Return: a pointer to the private metadata of the resource, or NULL if it 852 * wasn't found 853 */ 854 static struct vmw_ctx_validation_info * 855 vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context, 856 struct vmw_resource *res) 857 { 858 struct vmw_res_cache_entry *rcache = 859 &sw_context->res_cache[vmw_res_type(res)]; 860 861 if (rcache->valid && rcache->res == res) 862 return rcache->private; 863 864 WARN_ON_ONCE(true); 865 return NULL; 866 } 867 868 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, 869 struct vmw_sw_context *sw_context, 870 SVGA3dCmdHeader *header) 871 { 872 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget); 873 struct vmw_resource *ctx; 874 struct vmw_resource *res; 875 int ret; 876 877 cmd = container_of(header, typeof(*cmd), header); 878 879 if (cmd->body.type >= SVGA3D_RT_MAX) { 880 VMW_DEBUG_USER("Illegal render target type %u.\n", 881 (unsigned int) cmd->body.type); 882 return -EINVAL; 883 } 884 885 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 886 VMW_RES_DIRTY_SET, user_context_converter, 887 &cmd->body.cid, &ctx); 888 if (unlikely(ret != 0)) 889 return ret; 890 891 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 892 VMW_RES_DIRTY_SET, user_surface_converter, 893 &cmd->body.target.sid, &res); 894 if (unlikely(ret)) 895 return ret; 896 897 if (dev_priv->has_mob) { 898 struct vmw_ctx_bindinfo_view binding; 899 struct vmw_ctx_validation_info *node; 900 901 node = vmw_execbuf_info_from_res(sw_context, ctx); 902 if (!node) 903 return -EINVAL; 904 905 binding.bi.ctx = ctx; 906 binding.bi.res = res; 907 binding.bi.bt = vmw_ctx_binding_rt; 908 binding.slot = cmd->body.type; 909 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot); 910 } 911 912 return 0; 913 } 914 915 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, 916 struct vmw_sw_context *sw_context, 917 SVGA3dCmdHeader *header) 918 { 919 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy); 920 int ret; 921 922 cmd = container_of(header, typeof(*cmd), header); 923 924 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 925 VMW_RES_DIRTY_NONE, user_surface_converter, 926 &cmd->body.src.sid, NULL); 927 if (ret) 928 return ret; 929 930 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 931 VMW_RES_DIRTY_SET, user_surface_converter, 932 &cmd->body.dest.sid, NULL); 933 } 934 935 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv, 936 struct vmw_sw_context *sw_context, 937 SVGA3dCmdHeader *header) 938 { 939 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy); 940 int ret; 941 942 cmd = container_of(header, typeof(*cmd), header); 943 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 944 VMW_RES_DIRTY_NONE, user_surface_converter, 945 &cmd->body.src, NULL); 946 if (ret != 0) 947 return ret; 948 949 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 950 VMW_RES_DIRTY_SET, user_surface_converter, 951 &cmd->body.dest, NULL); 952 } 953 954 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv, 955 struct vmw_sw_context *sw_context, 956 SVGA3dCmdHeader *header) 957 { 958 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion); 959 int ret; 960 961 cmd = container_of(header, typeof(*cmd), header); 962 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 963 VMW_RES_DIRTY_NONE, user_surface_converter, 964 &cmd->body.srcSid, NULL); 965 if (ret != 0) 966 return ret; 967 968 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 969 VMW_RES_DIRTY_SET, user_surface_converter, 970 &cmd->body.dstSid, NULL); 971 } 972 973 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, 974 struct vmw_sw_context *sw_context, 975 SVGA3dCmdHeader *header) 976 { 977 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt); 978 int ret; 979 980 cmd = container_of(header, typeof(*cmd), header); 981 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 982 VMW_RES_DIRTY_NONE, user_surface_converter, 983 &cmd->body.src.sid, NULL); 984 if (unlikely(ret != 0)) 985 return ret; 986 987 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 988 VMW_RES_DIRTY_SET, user_surface_converter, 989 &cmd->body.dest.sid, NULL); 990 } 991 992 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, 993 struct vmw_sw_context *sw_context, 994 SVGA3dCmdHeader *header) 995 { 996 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) = 997 container_of(header, typeof(*cmd), header); 998 999 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1000 VMW_RES_DIRTY_NONE, user_surface_converter, 1001 &cmd->body.srcImage.sid, NULL); 1002 } 1003 1004 static int vmw_cmd_present_check(struct vmw_private *dev_priv, 1005 struct vmw_sw_context *sw_context, 1006 SVGA3dCmdHeader *header) 1007 { 1008 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) = 1009 container_of(header, typeof(*cmd), header); 1010 1011 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1012 VMW_RES_DIRTY_NONE, user_surface_converter, 1013 &cmd->body.sid, NULL); 1014 } 1015 1016 /** 1017 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. 1018 * 1019 * @dev_priv: The device private structure. 1020 * @new_query_bo: The new buffer holding query results. 1021 * @sw_context: The software context used for this command submission. 1022 * 1023 * This function checks whether @new_query_bo is suitable for holding query 1024 * results, and if another buffer currently is pinned for query results. If so, 1025 * the function prepares the state of @sw_context for switching pinned buffers 1026 * after successful submission of the current command batch. 1027 */ 1028 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, 1029 struct vmw_buffer_object *new_query_bo, 1030 struct vmw_sw_context *sw_context) 1031 { 1032 struct vmw_res_cache_entry *ctx_entry = 1033 &sw_context->res_cache[vmw_res_context]; 1034 int ret; 1035 1036 BUG_ON(!ctx_entry->valid); 1037 sw_context->last_query_ctx = ctx_entry->res; 1038 1039 if (unlikely(new_query_bo != sw_context->cur_query_bo)) { 1040 1041 if (unlikely(new_query_bo->base.num_pages > 4)) { 1042 VMW_DEBUG_USER("Query buffer too large.\n"); 1043 return -EINVAL; 1044 } 1045 1046 if (unlikely(sw_context->cur_query_bo != NULL)) { 1047 sw_context->needs_post_query_barrier = true; 1048 ret = vmw_validation_add_bo(sw_context->ctx, 1049 sw_context->cur_query_bo, 1050 dev_priv->has_mob, false); 1051 if (unlikely(ret != 0)) 1052 return ret; 1053 } 1054 sw_context->cur_query_bo = new_query_bo; 1055 1056 ret = vmw_validation_add_bo(sw_context->ctx, 1057 dev_priv->dummy_query_bo, 1058 dev_priv->has_mob, false); 1059 if (unlikely(ret != 0)) 1060 return ret; 1061 } 1062 1063 return 0; 1064 } 1065 1066 /** 1067 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer 1068 * 1069 * @dev_priv: The device private structure. 1070 * @sw_context: The software context used for this command submission batch. 1071 * 1072 * This function will check if we're switching query buffers, and will then, 1073 * issue a dummy occlusion query wait used as a query barrier. When the fence 1074 * object following that query wait has signaled, we are sure that all preceding 1075 * queries have finished, and the old query buffer can be unpinned. However, 1076 * since both the new query buffer and the old one are fenced with that fence, 1077 * we can do an asynchronus unpin now, and be sure that the old query buffer 1078 * won't be moved until the fence has signaled. 1079 * 1080 * As mentioned above, both the new - and old query buffers need to be fenced 1081 * using a sequence emitted *after* calling this function. 1082 */ 1083 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, 1084 struct vmw_sw_context *sw_context) 1085 { 1086 /* 1087 * The validate list should still hold references to all 1088 * contexts here. 1089 */ 1090 if (sw_context->needs_post_query_barrier) { 1091 struct vmw_res_cache_entry *ctx_entry = 1092 &sw_context->res_cache[vmw_res_context]; 1093 struct vmw_resource *ctx; 1094 int ret; 1095 1096 BUG_ON(!ctx_entry->valid); 1097 ctx = ctx_entry->res; 1098 1099 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); 1100 1101 if (unlikely(ret != 0)) 1102 VMW_DEBUG_USER("Out of fifo space for dummy query.\n"); 1103 } 1104 1105 if (dev_priv->pinned_bo != sw_context->cur_query_bo) { 1106 if (dev_priv->pinned_bo) { 1107 vmw_bo_pin_reserved(dev_priv->pinned_bo, false); 1108 vmw_bo_unreference(&dev_priv->pinned_bo); 1109 } 1110 1111 if (!sw_context->needs_post_query_barrier) { 1112 vmw_bo_pin_reserved(sw_context->cur_query_bo, true); 1113 1114 /* 1115 * We pin also the dummy_query_bo buffer so that we 1116 * don't need to validate it when emitting dummy queries 1117 * in context destroy paths. 1118 */ 1119 if (!dev_priv->dummy_query_bo_pinned) { 1120 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, 1121 true); 1122 dev_priv->dummy_query_bo_pinned = true; 1123 } 1124 1125 BUG_ON(sw_context->last_query_ctx == NULL); 1126 dev_priv->query_cid = sw_context->last_query_ctx->id; 1127 dev_priv->query_cid_valid = true; 1128 dev_priv->pinned_bo = 1129 vmw_bo_reference(sw_context->cur_query_bo); 1130 } 1131 } 1132 } 1133 1134 /** 1135 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle 1136 * to a MOB id. 1137 * 1138 * @dev_priv: Pointer to a device private structure. 1139 * @sw_context: The software context used for this command batch validation. 1140 * @id: Pointer to the user-space handle to be translated. 1141 * @vmw_bo_p: Points to a location that, on successful return will carry a 1142 * non-reference-counted pointer to the buffer object identified by the 1143 * user-space handle in @id. 1144 * 1145 * This function saves information needed to translate a user-space buffer 1146 * handle to a MOB id. The translation does not take place immediately, but 1147 * during a call to vmw_apply_relocations(). 1148 * 1149 * This function builds a relocation list and a list of buffers to validate. The 1150 * former needs to be freed using either vmw_apply_relocations() or 1151 * vmw_free_relocations(). The latter needs to be freed using 1152 * vmw_clear_validations. 1153 */ 1154 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, 1155 struct vmw_sw_context *sw_context, 1156 SVGAMobId *id, 1157 struct vmw_buffer_object **vmw_bo_p) 1158 { 1159 struct vmw_buffer_object *vmw_bo; 1160 uint32_t handle = *id; 1161 struct vmw_relocation *reloc; 1162 int ret; 1163 1164 vmw_validation_preload_bo(sw_context->ctx); 1165 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle); 1166 if (IS_ERR(vmw_bo)) { 1167 VMW_DEBUG_USER("Could not find or use MOB buffer.\n"); 1168 return PTR_ERR(vmw_bo); 1169 } 1170 1171 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false); 1172 vmw_user_bo_noref_release(); 1173 if (unlikely(ret != 0)) 1174 return ret; 1175 1176 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc)); 1177 if (!reloc) 1178 return -ENOMEM; 1179 1180 reloc->mob_loc = id; 1181 reloc->vbo = vmw_bo; 1182 1183 *vmw_bo_p = vmw_bo; 1184 list_add_tail(&reloc->head, &sw_context->bo_relocations); 1185 1186 return 0; 1187 } 1188 1189 /** 1190 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle 1191 * to a valid SVGAGuestPtr 1192 * 1193 * @dev_priv: Pointer to a device private structure. 1194 * @sw_context: The software context used for this command batch validation. 1195 * @ptr: Pointer to the user-space handle to be translated. 1196 * @vmw_bo_p: Points to a location that, on successful return will carry a 1197 * non-reference-counted pointer to the DMA buffer identified by the user-space 1198 * handle in @id. 1199 * 1200 * This function saves information needed to translate a user-space buffer 1201 * handle to a valid SVGAGuestPtr. The translation does not take place 1202 * immediately, but during a call to vmw_apply_relocations(). 1203 * 1204 * This function builds a relocation list and a list of buffers to validate. 1205 * The former needs to be freed using either vmw_apply_relocations() or 1206 * vmw_free_relocations(). The latter needs to be freed using 1207 * vmw_clear_validations. 1208 */ 1209 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, 1210 struct vmw_sw_context *sw_context, 1211 SVGAGuestPtr *ptr, 1212 struct vmw_buffer_object **vmw_bo_p) 1213 { 1214 struct vmw_buffer_object *vmw_bo; 1215 uint32_t handle = ptr->gmrId; 1216 struct vmw_relocation *reloc; 1217 int ret; 1218 1219 vmw_validation_preload_bo(sw_context->ctx); 1220 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle); 1221 if (IS_ERR(vmw_bo)) { 1222 VMW_DEBUG_USER("Could not find or use GMR region.\n"); 1223 return PTR_ERR(vmw_bo); 1224 } 1225 1226 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false); 1227 vmw_user_bo_noref_release(); 1228 if (unlikely(ret != 0)) 1229 return ret; 1230 1231 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc)); 1232 if (!reloc) 1233 return -ENOMEM; 1234 1235 reloc->location = ptr; 1236 reloc->vbo = vmw_bo; 1237 *vmw_bo_p = vmw_bo; 1238 list_add_tail(&reloc->head, &sw_context->bo_relocations); 1239 1240 return 0; 1241 } 1242 1243 /** 1244 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command. 1245 * 1246 * @dev_priv: Pointer to a device private struct. 1247 * @sw_context: The software context used for this command submission. 1248 * @header: Pointer to the command header in the command stream. 1249 * 1250 * This function adds the new query into the query COTABLE 1251 */ 1252 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv, 1253 struct vmw_sw_context *sw_context, 1254 SVGA3dCmdHeader *header) 1255 { 1256 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery); 1257 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 1258 struct vmw_resource *cotable_res; 1259 int ret; 1260 1261 if (!ctx_node) 1262 return -EINVAL; 1263 1264 cmd = container_of(header, typeof(*cmd), header); 1265 1266 if (cmd->body.type < SVGA3D_QUERYTYPE_MIN || 1267 cmd->body.type >= SVGA3D_QUERYTYPE_MAX) 1268 return -EINVAL; 1269 1270 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY); 1271 ret = vmw_cotable_notify(cotable_res, cmd->body.queryId); 1272 1273 return ret; 1274 } 1275 1276 /** 1277 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command. 1278 * 1279 * @dev_priv: Pointer to a device private struct. 1280 * @sw_context: The software context used for this command submission. 1281 * @header: Pointer to the command header in the command stream. 1282 * 1283 * The query bind operation will eventually associate the query ID with its 1284 * backing MOB. In this function, we take the user mode MOB ID and use 1285 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent. 1286 */ 1287 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, 1288 struct vmw_sw_context *sw_context, 1289 SVGA3dCmdHeader *header) 1290 { 1291 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery); 1292 struct vmw_buffer_object *vmw_bo; 1293 int ret; 1294 1295 cmd = container_of(header, typeof(*cmd), header); 1296 1297 /* 1298 * Look up the buffer pointed to by q.mobid, put it on the relocation 1299 * list so its kernel mode MOB ID can be filled in later 1300 */ 1301 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid, 1302 &vmw_bo); 1303 1304 if (ret != 0) 1305 return ret; 1306 1307 sw_context->dx_query_mob = vmw_bo; 1308 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx; 1309 return 0; 1310 } 1311 1312 /** 1313 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command. 1314 * 1315 * @dev_priv: Pointer to a device private struct. 1316 * @sw_context: The software context used for this command submission. 1317 * @header: Pointer to the command header in the command stream. 1318 */ 1319 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, 1320 struct vmw_sw_context *sw_context, 1321 SVGA3dCmdHeader *header) 1322 { 1323 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) = 1324 container_of(header, typeof(*cmd), header); 1325 1326 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1327 VMW_RES_DIRTY_SET, user_context_converter, 1328 &cmd->body.cid, NULL); 1329 } 1330 1331 /** 1332 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command. 1333 * 1334 * @dev_priv: Pointer to a device private struct. 1335 * @sw_context: The software context used for this command submission. 1336 * @header: Pointer to the command header in the command stream. 1337 */ 1338 static int vmw_cmd_begin_query(struct vmw_private *dev_priv, 1339 struct vmw_sw_context *sw_context, 1340 SVGA3dCmdHeader *header) 1341 { 1342 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) = 1343 container_of(header, typeof(*cmd), header); 1344 1345 if (unlikely(dev_priv->has_mob)) { 1346 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery); 1347 1348 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); 1349 1350 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY; 1351 gb_cmd.header.size = cmd->header.size; 1352 gb_cmd.body.cid = cmd->body.cid; 1353 gb_cmd.body.type = cmd->body.type; 1354 1355 memcpy(cmd, &gb_cmd, sizeof(*cmd)); 1356 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header); 1357 } 1358 1359 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1360 VMW_RES_DIRTY_SET, user_context_converter, 1361 &cmd->body.cid, NULL); 1362 } 1363 1364 /** 1365 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command. 1366 * 1367 * @dev_priv: Pointer to a device private struct. 1368 * @sw_context: The software context used for this command submission. 1369 * @header: Pointer to the command header in the command stream. 1370 */ 1371 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, 1372 struct vmw_sw_context *sw_context, 1373 SVGA3dCmdHeader *header) 1374 { 1375 struct vmw_buffer_object *vmw_bo; 1376 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery); 1377 int ret; 1378 1379 cmd = container_of(header, typeof(*cmd), header); 1380 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1381 if (unlikely(ret != 0)) 1382 return ret; 1383 1384 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid, 1385 &vmw_bo); 1386 if (unlikely(ret != 0)) 1387 return ret; 1388 1389 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); 1390 1391 return ret; 1392 } 1393 1394 /** 1395 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command. 1396 * 1397 * @dev_priv: Pointer to a device private struct. 1398 * @sw_context: The software context used for this command submission. 1399 * @header: Pointer to the command header in the command stream. 1400 */ 1401 static int vmw_cmd_end_query(struct vmw_private *dev_priv, 1402 struct vmw_sw_context *sw_context, 1403 SVGA3dCmdHeader *header) 1404 { 1405 struct vmw_buffer_object *vmw_bo; 1406 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery); 1407 int ret; 1408 1409 cmd = container_of(header, typeof(*cmd), header); 1410 if (dev_priv->has_mob) { 1411 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery); 1412 1413 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); 1414 1415 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY; 1416 gb_cmd.header.size = cmd->header.size; 1417 gb_cmd.body.cid = cmd->body.cid; 1418 gb_cmd.body.type = cmd->body.type; 1419 gb_cmd.body.mobid = cmd->body.guestResult.gmrId; 1420 gb_cmd.body.offset = cmd->body.guestResult.offset; 1421 1422 memcpy(cmd, &gb_cmd, sizeof(*cmd)); 1423 return vmw_cmd_end_gb_query(dev_priv, sw_context, header); 1424 } 1425 1426 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1427 if (unlikely(ret != 0)) 1428 return ret; 1429 1430 ret = vmw_translate_guest_ptr(dev_priv, sw_context, 1431 &cmd->body.guestResult, &vmw_bo); 1432 if (unlikely(ret != 0)) 1433 return ret; 1434 1435 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); 1436 1437 return ret; 1438 } 1439 1440 /** 1441 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command. 1442 * 1443 * @dev_priv: Pointer to a device private struct. 1444 * @sw_context: The software context used for this command submission. 1445 * @header: Pointer to the command header in the command stream. 1446 */ 1447 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, 1448 struct vmw_sw_context *sw_context, 1449 SVGA3dCmdHeader *header) 1450 { 1451 struct vmw_buffer_object *vmw_bo; 1452 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery); 1453 int ret; 1454 1455 cmd = container_of(header, typeof(*cmd), header); 1456 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1457 if (unlikely(ret != 0)) 1458 return ret; 1459 1460 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid, 1461 &vmw_bo); 1462 if (unlikely(ret != 0)) 1463 return ret; 1464 1465 return 0; 1466 } 1467 1468 /** 1469 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command. 1470 * 1471 * @dev_priv: Pointer to a device private struct. 1472 * @sw_context: The software context used for this command submission. 1473 * @header: Pointer to the command header in the command stream. 1474 */ 1475 static int vmw_cmd_wait_query(struct vmw_private *dev_priv, 1476 struct vmw_sw_context *sw_context, 1477 SVGA3dCmdHeader *header) 1478 { 1479 struct vmw_buffer_object *vmw_bo; 1480 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery); 1481 int ret; 1482 1483 cmd = container_of(header, typeof(*cmd), header); 1484 if (dev_priv->has_mob) { 1485 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery); 1486 1487 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); 1488 1489 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; 1490 gb_cmd.header.size = cmd->header.size; 1491 gb_cmd.body.cid = cmd->body.cid; 1492 gb_cmd.body.type = cmd->body.type; 1493 gb_cmd.body.mobid = cmd->body.guestResult.gmrId; 1494 gb_cmd.body.offset = cmd->body.guestResult.offset; 1495 1496 memcpy(cmd, &gb_cmd, sizeof(*cmd)); 1497 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header); 1498 } 1499 1500 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1501 if (unlikely(ret != 0)) 1502 return ret; 1503 1504 ret = vmw_translate_guest_ptr(dev_priv, sw_context, 1505 &cmd->body.guestResult, &vmw_bo); 1506 if (unlikely(ret != 0)) 1507 return ret; 1508 1509 return 0; 1510 } 1511 1512 static int vmw_cmd_dma(struct vmw_private *dev_priv, 1513 struct vmw_sw_context *sw_context, 1514 SVGA3dCmdHeader *header) 1515 { 1516 struct vmw_buffer_object *vmw_bo = NULL; 1517 struct vmw_surface *srf = NULL; 1518 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA); 1519 int ret; 1520 SVGA3dCmdSurfaceDMASuffix *suffix; 1521 uint32_t bo_size; 1522 bool dirty; 1523 1524 cmd = container_of(header, typeof(*cmd), header); 1525 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body + 1526 header->size - sizeof(*suffix)); 1527 1528 /* Make sure device and verifier stays in sync. */ 1529 if (unlikely(suffix->suffixSize != sizeof(*suffix))) { 1530 VMW_DEBUG_USER("Invalid DMA suffix size.\n"); 1531 return -EINVAL; 1532 } 1533 1534 ret = vmw_translate_guest_ptr(dev_priv, sw_context, 1535 &cmd->body.guest.ptr, &vmw_bo); 1536 if (unlikely(ret != 0)) 1537 return ret; 1538 1539 /* Make sure DMA doesn't cross BO boundaries. */ 1540 bo_size = vmw_bo->base.num_pages * PAGE_SIZE; 1541 if (unlikely(cmd->body.guest.ptr.offset > bo_size)) { 1542 VMW_DEBUG_USER("Invalid DMA offset.\n"); 1543 return -EINVAL; 1544 } 1545 1546 bo_size -= cmd->body.guest.ptr.offset; 1547 if (unlikely(suffix->maximumOffset > bo_size)) 1548 suffix->maximumOffset = bo_size; 1549 1550 dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ? 1551 VMW_RES_DIRTY_SET : 0; 1552 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1553 dirty, user_surface_converter, 1554 &cmd->body.host.sid, NULL); 1555 if (unlikely(ret != 0)) { 1556 if (unlikely(ret != -ERESTARTSYS)) 1557 VMW_DEBUG_USER("could not find surface for DMA.\n"); 1558 return ret; 1559 } 1560 1561 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); 1562 1563 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header); 1564 1565 return 0; 1566 } 1567 1568 static int vmw_cmd_draw(struct vmw_private *dev_priv, 1569 struct vmw_sw_context *sw_context, 1570 SVGA3dCmdHeader *header) 1571 { 1572 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives); 1573 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)( 1574 (unsigned long)header + sizeof(*cmd)); 1575 SVGA3dPrimitiveRange *range; 1576 uint32_t i; 1577 uint32_t maxnum; 1578 int ret; 1579 1580 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 1581 if (unlikely(ret != 0)) 1582 return ret; 1583 1584 cmd = container_of(header, typeof(*cmd), header); 1585 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl); 1586 1587 if (unlikely(cmd->body.numVertexDecls > maxnum)) { 1588 VMW_DEBUG_USER("Illegal number of vertex declarations.\n"); 1589 return -EINVAL; 1590 } 1591 1592 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { 1593 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1594 VMW_RES_DIRTY_NONE, 1595 user_surface_converter, 1596 &decl->array.surfaceId, NULL); 1597 if (unlikely(ret != 0)) 1598 return ret; 1599 } 1600 1601 maxnum = (header->size - sizeof(cmd->body) - 1602 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range); 1603 if (unlikely(cmd->body.numRanges > maxnum)) { 1604 VMW_DEBUG_USER("Illegal number of index ranges.\n"); 1605 return -EINVAL; 1606 } 1607 1608 range = (SVGA3dPrimitiveRange *) decl; 1609 for (i = 0; i < cmd->body.numRanges; ++i, ++range) { 1610 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1611 VMW_RES_DIRTY_NONE, 1612 user_surface_converter, 1613 &range->indexArray.surfaceId, NULL); 1614 if (unlikely(ret != 0)) 1615 return ret; 1616 } 1617 return 0; 1618 } 1619 1620 static int vmw_cmd_tex_state(struct vmw_private *dev_priv, 1621 struct vmw_sw_context *sw_context, 1622 SVGA3dCmdHeader *header) 1623 { 1624 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState); 1625 SVGA3dTextureState *last_state = (SVGA3dTextureState *) 1626 ((unsigned long) header + header->size + sizeof(header)); 1627 SVGA3dTextureState *cur_state = (SVGA3dTextureState *) 1628 ((unsigned long) header + sizeof(*cmd)); 1629 struct vmw_resource *ctx; 1630 struct vmw_resource *res; 1631 int ret; 1632 1633 cmd = container_of(header, typeof(*cmd), header); 1634 1635 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1636 VMW_RES_DIRTY_SET, user_context_converter, 1637 &cmd->body.cid, &ctx); 1638 if (unlikely(ret != 0)) 1639 return ret; 1640 1641 for (; cur_state < last_state; ++cur_state) { 1642 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) 1643 continue; 1644 1645 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) { 1646 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n", 1647 (unsigned int) cur_state->stage); 1648 return -EINVAL; 1649 } 1650 1651 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1652 VMW_RES_DIRTY_NONE, 1653 user_surface_converter, 1654 &cur_state->value, &res); 1655 if (unlikely(ret != 0)) 1656 return ret; 1657 1658 if (dev_priv->has_mob) { 1659 struct vmw_ctx_bindinfo_tex binding; 1660 struct vmw_ctx_validation_info *node; 1661 1662 node = vmw_execbuf_info_from_res(sw_context, ctx); 1663 if (!node) 1664 return -EINVAL; 1665 1666 binding.bi.ctx = ctx; 1667 binding.bi.res = res; 1668 binding.bi.bt = vmw_ctx_binding_tex; 1669 binding.texture_stage = cur_state->stage; 1670 vmw_binding_add(node->staged, &binding.bi, 0, 1671 binding.texture_stage); 1672 } 1673 } 1674 1675 return 0; 1676 } 1677 1678 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, 1679 struct vmw_sw_context *sw_context, 1680 void *buf) 1681 { 1682 struct vmw_buffer_object *vmw_bo; 1683 1684 struct { 1685 uint32_t header; 1686 SVGAFifoCmdDefineGMRFB body; 1687 } *cmd = buf; 1688 1689 return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr, 1690 &vmw_bo); 1691 } 1692 1693 /** 1694 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer 1695 * switching 1696 * 1697 * @dev_priv: Pointer to a device private struct. 1698 * @sw_context: The software context being used for this batch. 1699 * @val_node: The validation node representing the resource. 1700 * @buf_id: Pointer to the user-space backup buffer handle in the command 1701 * stream. 1702 * @backup_offset: Offset of backup into MOB. 1703 * 1704 * This function prepares for registering a switch of backup buffers in the 1705 * resource metadata just prior to unreserving. It's basically a wrapper around 1706 * vmw_cmd_res_switch_backup with a different interface. 1707 */ 1708 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, 1709 struct vmw_sw_context *sw_context, 1710 struct vmw_resource *res, uint32_t *buf_id, 1711 unsigned long backup_offset) 1712 { 1713 struct vmw_buffer_object *vbo; 1714 void *info; 1715 int ret; 1716 1717 info = vmw_execbuf_info_from_res(sw_context, res); 1718 if (!info) 1719 return -EINVAL; 1720 1721 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo); 1722 if (ret) 1723 return ret; 1724 1725 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo, 1726 backup_offset); 1727 return 0; 1728 } 1729 1730 /** 1731 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching 1732 * 1733 * @dev_priv: Pointer to a device private struct. 1734 * @sw_context: The software context being used for this batch. 1735 * @res_type: The resource type. 1736 * @converter: Information about user-space binding for this resource type. 1737 * @res_id: Pointer to the user-space resource handle in the command stream. 1738 * @buf_id: Pointer to the user-space backup buffer handle in the command 1739 * stream. 1740 * @backup_offset: Offset of backup into MOB. 1741 * 1742 * This function prepares for registering a switch of backup buffers in the 1743 * resource metadata just prior to unreserving. It's basically a wrapper around 1744 * vmw_cmd_res_switch_backup with a different interface. 1745 */ 1746 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, 1747 struct vmw_sw_context *sw_context, 1748 enum vmw_res_type res_type, 1749 const struct vmw_user_resource_conv 1750 *converter, uint32_t *res_id, uint32_t *buf_id, 1751 unsigned long backup_offset) 1752 { 1753 struct vmw_resource *res; 1754 int ret; 1755 1756 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, 1757 VMW_RES_DIRTY_NONE, converter, res_id, &res); 1758 if (ret) 1759 return ret; 1760 1761 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id, 1762 backup_offset); 1763 } 1764 1765 /** 1766 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command 1767 * 1768 * @dev_priv: Pointer to a device private struct. 1769 * @sw_context: The software context being used for this batch. 1770 * @header: Pointer to the command header in the command stream. 1771 */ 1772 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, 1773 struct vmw_sw_context *sw_context, 1774 SVGA3dCmdHeader *header) 1775 { 1776 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) = 1777 container_of(header, typeof(*cmd), header); 1778 1779 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface, 1780 user_surface_converter, &cmd->body.sid, 1781 &cmd->body.mobid, 0); 1782 } 1783 1784 /** 1785 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command 1786 * 1787 * @dev_priv: Pointer to a device private struct. 1788 * @sw_context: The software context being used for this batch. 1789 * @header: Pointer to the command header in the command stream. 1790 */ 1791 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, 1792 struct vmw_sw_context *sw_context, 1793 SVGA3dCmdHeader *header) 1794 { 1795 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) = 1796 container_of(header, typeof(*cmd), header); 1797 1798 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1799 VMW_RES_DIRTY_NONE, user_surface_converter, 1800 &cmd->body.image.sid, NULL); 1801 } 1802 1803 /** 1804 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command 1805 * 1806 * @dev_priv: Pointer to a device private struct. 1807 * @sw_context: The software context being used for this batch. 1808 * @header: Pointer to the command header in the command stream. 1809 */ 1810 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, 1811 struct vmw_sw_context *sw_context, 1812 SVGA3dCmdHeader *header) 1813 { 1814 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) = 1815 container_of(header, typeof(*cmd), header); 1816 1817 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1818 VMW_RES_DIRTY_CLEAR, user_surface_converter, 1819 &cmd->body.sid, NULL); 1820 } 1821 1822 /** 1823 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command 1824 * 1825 * @dev_priv: Pointer to a device private struct. 1826 * @sw_context: The software context being used for this batch. 1827 * @header: Pointer to the command header in the command stream. 1828 */ 1829 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, 1830 struct vmw_sw_context *sw_context, 1831 SVGA3dCmdHeader *header) 1832 { 1833 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) = 1834 container_of(header, typeof(*cmd), header); 1835 1836 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1837 VMW_RES_DIRTY_NONE, user_surface_converter, 1838 &cmd->body.image.sid, NULL); 1839 } 1840 1841 /** 1842 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE 1843 * command 1844 * 1845 * @dev_priv: Pointer to a device private struct. 1846 * @sw_context: The software context being used for this batch. 1847 * @header: Pointer to the command header in the command stream. 1848 */ 1849 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, 1850 struct vmw_sw_context *sw_context, 1851 SVGA3dCmdHeader *header) 1852 { 1853 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) = 1854 container_of(header, typeof(*cmd), header); 1855 1856 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1857 VMW_RES_DIRTY_CLEAR, user_surface_converter, 1858 &cmd->body.sid, NULL); 1859 } 1860 1861 /** 1862 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE 1863 * command 1864 * 1865 * @dev_priv: Pointer to a device private struct. 1866 * @sw_context: The software context being used for this batch. 1867 * @header: Pointer to the command header in the command stream. 1868 */ 1869 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, 1870 struct vmw_sw_context *sw_context, 1871 SVGA3dCmdHeader *header) 1872 { 1873 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) = 1874 container_of(header, typeof(*cmd), header); 1875 1876 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1877 VMW_RES_DIRTY_NONE, user_surface_converter, 1878 &cmd->body.image.sid, NULL); 1879 } 1880 1881 /** 1882 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE 1883 * command 1884 * 1885 * @dev_priv: Pointer to a device private struct. 1886 * @sw_context: The software context being used for this batch. 1887 * @header: Pointer to the command header in the command stream. 1888 */ 1889 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, 1890 struct vmw_sw_context *sw_context, 1891 SVGA3dCmdHeader *header) 1892 { 1893 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) = 1894 container_of(header, typeof(*cmd), header); 1895 1896 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1897 VMW_RES_DIRTY_CLEAR, user_surface_converter, 1898 &cmd->body.sid, NULL); 1899 } 1900 1901 /** 1902 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command 1903 * 1904 * @dev_priv: Pointer to a device private struct. 1905 * @sw_context: The software context being used for this batch. 1906 * @header: Pointer to the command header in the command stream. 1907 */ 1908 static int vmw_cmd_shader_define(struct vmw_private *dev_priv, 1909 struct vmw_sw_context *sw_context, 1910 SVGA3dCmdHeader *header) 1911 { 1912 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader); 1913 int ret; 1914 size_t size; 1915 struct vmw_resource *ctx; 1916 1917 cmd = container_of(header, typeof(*cmd), header); 1918 1919 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1920 VMW_RES_DIRTY_SET, user_context_converter, 1921 &cmd->body.cid, &ctx); 1922 if (unlikely(ret != 0)) 1923 return ret; 1924 1925 if (unlikely(!dev_priv->has_mob)) 1926 return 0; 1927 1928 size = cmd->header.size - sizeof(cmd->body); 1929 ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx), 1930 cmd->body.shid, cmd + 1, cmd->body.type, 1931 size, &sw_context->staged_cmd_res); 1932 if (unlikely(ret != 0)) 1933 return ret; 1934 1935 return vmw_resource_relocation_add(sw_context, NULL, 1936 vmw_ptr_diff(sw_context->buf_start, 1937 &cmd->header.id), 1938 vmw_res_rel_nop); 1939 } 1940 1941 /** 1942 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command 1943 * 1944 * @dev_priv: Pointer to a device private struct. 1945 * @sw_context: The software context being used for this batch. 1946 * @header: Pointer to the command header in the command stream. 1947 */ 1948 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, 1949 struct vmw_sw_context *sw_context, 1950 SVGA3dCmdHeader *header) 1951 { 1952 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader); 1953 int ret; 1954 struct vmw_resource *ctx; 1955 1956 cmd = container_of(header, typeof(*cmd), header); 1957 1958 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1959 VMW_RES_DIRTY_SET, user_context_converter, 1960 &cmd->body.cid, &ctx); 1961 if (unlikely(ret != 0)) 1962 return ret; 1963 1964 if (unlikely(!dev_priv->has_mob)) 1965 return 0; 1966 1967 ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid, 1968 cmd->body.type, &sw_context->staged_cmd_res); 1969 if (unlikely(ret != 0)) 1970 return ret; 1971 1972 return vmw_resource_relocation_add(sw_context, NULL, 1973 vmw_ptr_diff(sw_context->buf_start, 1974 &cmd->header.id), 1975 vmw_res_rel_nop); 1976 } 1977 1978 /** 1979 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command 1980 * 1981 * @dev_priv: Pointer to a device private struct. 1982 * @sw_context: The software context being used for this batch. 1983 * @header: Pointer to the command header in the command stream. 1984 */ 1985 static int vmw_cmd_set_shader(struct vmw_private *dev_priv, 1986 struct vmw_sw_context *sw_context, 1987 SVGA3dCmdHeader *header) 1988 { 1989 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader); 1990 struct vmw_ctx_bindinfo_shader binding; 1991 struct vmw_resource *ctx, *res = NULL; 1992 struct vmw_ctx_validation_info *ctx_info; 1993 int ret; 1994 1995 cmd = container_of(header, typeof(*cmd), header); 1996 1997 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) { 1998 VMW_DEBUG_USER("Illegal shader type %u.\n", 1999 (unsigned int) cmd->body.type); 2000 return -EINVAL; 2001 } 2002 2003 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 2004 VMW_RES_DIRTY_SET, user_context_converter, 2005 &cmd->body.cid, &ctx); 2006 if (unlikely(ret != 0)) 2007 return ret; 2008 2009 if (!dev_priv->has_mob) 2010 return 0; 2011 2012 if (cmd->body.shid != SVGA3D_INVALID_ID) { 2013 /* 2014 * This is the compat shader path - Per device guest-backed 2015 * shaders, but user-space thinks it's per context host- 2016 * backed shaders. 2017 */ 2018 res = vmw_shader_lookup(vmw_context_res_man(ctx), 2019 cmd->body.shid, cmd->body.type); 2020 if (!IS_ERR(res)) { 2021 ret = vmw_execbuf_res_noctx_val_add(sw_context, res, 2022 VMW_RES_DIRTY_NONE); 2023 if (unlikely(ret != 0)) 2024 return ret; 2025 2026 ret = vmw_resource_relocation_add 2027 (sw_context, res, 2028 vmw_ptr_diff(sw_context->buf_start, 2029 &cmd->body.shid), 2030 vmw_res_rel_normal); 2031 if (unlikely(ret != 0)) 2032 return ret; 2033 } 2034 } 2035 2036 if (IS_ERR_OR_NULL(res)) { 2037 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, 2038 VMW_RES_DIRTY_NONE, 2039 user_shader_converter, &cmd->body.shid, 2040 &res); 2041 if (unlikely(ret != 0)) 2042 return ret; 2043 } 2044 2045 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx); 2046 if (!ctx_info) 2047 return -EINVAL; 2048 2049 binding.bi.ctx = ctx; 2050 binding.bi.res = res; 2051 binding.bi.bt = vmw_ctx_binding_shader; 2052 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; 2053 vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0); 2054 2055 return 0; 2056 } 2057 2058 /** 2059 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command 2060 * 2061 * @dev_priv: Pointer to a device private struct. 2062 * @sw_context: The software context being used for this batch. 2063 * @header: Pointer to the command header in the command stream. 2064 */ 2065 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv, 2066 struct vmw_sw_context *sw_context, 2067 SVGA3dCmdHeader *header) 2068 { 2069 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst); 2070 int ret; 2071 2072 cmd = container_of(header, typeof(*cmd), header); 2073 2074 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 2075 VMW_RES_DIRTY_SET, user_context_converter, 2076 &cmd->body.cid, NULL); 2077 if (unlikely(ret != 0)) 2078 return ret; 2079 2080 if (dev_priv->has_mob) 2081 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE; 2082 2083 return 0; 2084 } 2085 2086 /** 2087 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command 2088 * 2089 * @dev_priv: Pointer to a device private struct. 2090 * @sw_context: The software context being used for this batch. 2091 * @header: Pointer to the command header in the command stream. 2092 */ 2093 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, 2094 struct vmw_sw_context *sw_context, 2095 SVGA3dCmdHeader *header) 2096 { 2097 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) = 2098 container_of(header, typeof(*cmd), header); 2099 2100 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader, 2101 user_shader_converter, &cmd->body.shid, 2102 &cmd->body.mobid, cmd->body.offsetInBytes); 2103 } 2104 2105 /** 2106 * vmw_cmd_dx_set_single_constant_buffer - Validate 2107 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command. 2108 * 2109 * @dev_priv: Pointer to a device private struct. 2110 * @sw_context: The software context being used for this batch. 2111 * @header: Pointer to the command header in the command stream. 2112 */ 2113 static int 2114 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, 2115 struct vmw_sw_context *sw_context, 2116 SVGA3dCmdHeader *header) 2117 { 2118 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer); 2119 struct vmw_resource *res = NULL; 2120 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2121 struct vmw_ctx_bindinfo_cb binding; 2122 int ret; 2123 2124 if (!ctx_node) 2125 return -EINVAL; 2126 2127 cmd = container_of(header, typeof(*cmd), header); 2128 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2129 VMW_RES_DIRTY_NONE, user_surface_converter, 2130 &cmd->body.sid, &res); 2131 if (unlikely(ret != 0)) 2132 return ret; 2133 2134 binding.bi.ctx = ctx_node->ctx; 2135 binding.bi.res = res; 2136 binding.bi.bt = vmw_ctx_binding_cb; 2137 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; 2138 binding.offset = cmd->body.offsetInBytes; 2139 binding.size = cmd->body.sizeInBytes; 2140 binding.slot = cmd->body.slot; 2141 2142 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 || 2143 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { 2144 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n", 2145 (unsigned int) cmd->body.type, 2146 (unsigned int) binding.slot); 2147 return -EINVAL; 2148 } 2149 2150 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 2151 binding.slot); 2152 2153 return 0; 2154 } 2155 2156 /** 2157 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES 2158 * command 2159 * 2160 * @dev_priv: Pointer to a device private struct. 2161 * @sw_context: The software context being used for this batch. 2162 * @header: Pointer to the command header in the command stream. 2163 */ 2164 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv, 2165 struct vmw_sw_context *sw_context, 2166 SVGA3dCmdHeader *header) 2167 { 2168 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) = 2169 container_of(header, typeof(*cmd), header); 2170 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) / 2171 sizeof(SVGA3dShaderResourceViewId); 2172 2173 if ((u64) cmd->body.startView + (u64) num_sr_view > 2174 (u64) SVGA3D_DX_MAX_SRVIEWS || 2175 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) { 2176 VMW_DEBUG_USER("Invalid shader binding.\n"); 2177 return -EINVAL; 2178 } 2179 2180 return vmw_view_bindings_add(sw_context, vmw_view_sr, 2181 vmw_ctx_binding_sr, 2182 cmd->body.type - SVGA3D_SHADERTYPE_MIN, 2183 (void *) &cmd[1], num_sr_view, 2184 cmd->body.startView); 2185 } 2186 2187 /** 2188 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command 2189 * 2190 * @dev_priv: Pointer to a device private struct. 2191 * @sw_context: The software context being used for this batch. 2192 * @header: Pointer to the command header in the command stream. 2193 */ 2194 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, 2195 struct vmw_sw_context *sw_context, 2196 SVGA3dCmdHeader *header) 2197 { 2198 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader); 2199 struct vmw_resource *res = NULL; 2200 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2201 struct vmw_ctx_bindinfo_shader binding; 2202 int ret = 0; 2203 2204 if (!ctx_node) 2205 return -EINVAL; 2206 2207 cmd = container_of(header, typeof(*cmd), header); 2208 2209 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX || 2210 cmd->body.type < SVGA3D_SHADERTYPE_MIN) { 2211 VMW_DEBUG_USER("Illegal shader type %u.\n", 2212 (unsigned int) cmd->body.type); 2213 return -EINVAL; 2214 } 2215 2216 if (cmd->body.shaderId != SVGA3D_INVALID_ID) { 2217 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0); 2218 if (IS_ERR(res)) { 2219 VMW_DEBUG_USER("Could not find shader for binding.\n"); 2220 return PTR_ERR(res); 2221 } 2222 2223 ret = vmw_execbuf_res_noctx_val_add(sw_context, res, 2224 VMW_RES_DIRTY_NONE); 2225 if (ret) 2226 return ret; 2227 } 2228 2229 binding.bi.ctx = ctx_node->ctx; 2230 binding.bi.res = res; 2231 binding.bi.bt = vmw_ctx_binding_dx_shader; 2232 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; 2233 2234 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0); 2235 2236 return 0; 2237 } 2238 2239 /** 2240 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS 2241 * command 2242 * 2243 * @dev_priv: Pointer to a device private struct. 2244 * @sw_context: The software context being used for this batch. 2245 * @header: Pointer to the command header in the command stream. 2246 */ 2247 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, 2248 struct vmw_sw_context *sw_context, 2249 SVGA3dCmdHeader *header) 2250 { 2251 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2252 struct vmw_ctx_bindinfo_vb binding; 2253 struct vmw_resource *res; 2254 struct { 2255 SVGA3dCmdHeader header; 2256 SVGA3dCmdDXSetVertexBuffers body; 2257 SVGA3dVertexBuffer buf[]; 2258 } *cmd; 2259 int i, ret, num; 2260 2261 if (!ctx_node) 2262 return -EINVAL; 2263 2264 cmd = container_of(header, typeof(*cmd), header); 2265 num = (cmd->header.size - sizeof(cmd->body)) / 2266 sizeof(SVGA3dVertexBuffer); 2267 if ((u64)num + (u64)cmd->body.startBuffer > 2268 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) { 2269 VMW_DEBUG_USER("Invalid number of vertex buffers.\n"); 2270 return -EINVAL; 2271 } 2272 2273 for (i = 0; i < num; i++) { 2274 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2275 VMW_RES_DIRTY_NONE, 2276 user_surface_converter, 2277 &cmd->buf[i].sid, &res); 2278 if (unlikely(ret != 0)) 2279 return ret; 2280 2281 binding.bi.ctx = ctx_node->ctx; 2282 binding.bi.bt = vmw_ctx_binding_vb; 2283 binding.bi.res = res; 2284 binding.offset = cmd->buf[i].offset; 2285 binding.stride = cmd->buf[i].stride; 2286 binding.slot = i + cmd->body.startBuffer; 2287 2288 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot); 2289 } 2290 2291 return 0; 2292 } 2293 2294 /** 2295 * vmw_cmd_dx_ia_set_vertex_buffers - Validate 2296 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command. 2297 * 2298 * @dev_priv: Pointer to a device private struct. 2299 * @sw_context: The software context being used for this batch. 2300 * @header: Pointer to the command header in the command stream. 2301 */ 2302 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv, 2303 struct vmw_sw_context *sw_context, 2304 SVGA3dCmdHeader *header) 2305 { 2306 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2307 struct vmw_ctx_bindinfo_ib binding; 2308 struct vmw_resource *res; 2309 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer); 2310 int ret; 2311 2312 if (!ctx_node) 2313 return -EINVAL; 2314 2315 cmd = container_of(header, typeof(*cmd), header); 2316 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2317 VMW_RES_DIRTY_NONE, user_surface_converter, 2318 &cmd->body.sid, &res); 2319 if (unlikely(ret != 0)) 2320 return ret; 2321 2322 binding.bi.ctx = ctx_node->ctx; 2323 binding.bi.res = res; 2324 binding.bi.bt = vmw_ctx_binding_ib; 2325 binding.offset = cmd->body.offset; 2326 binding.format = cmd->body.format; 2327 2328 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0); 2329 2330 return 0; 2331 } 2332 2333 /** 2334 * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS 2335 * command 2336 * 2337 * @dev_priv: Pointer to a device private struct. 2338 * @sw_context: The software context being used for this batch. 2339 * @header: Pointer to the command header in the command stream. 2340 */ 2341 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv, 2342 struct vmw_sw_context *sw_context, 2343 SVGA3dCmdHeader *header) 2344 { 2345 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) = 2346 container_of(header, typeof(*cmd), header); 2347 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) / 2348 sizeof(SVGA3dRenderTargetViewId); 2349 int ret; 2350 2351 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) { 2352 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n"); 2353 return -EINVAL; 2354 } 2355 2356 ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds, 2357 0, &cmd->body.depthStencilViewId, 1, 0); 2358 if (ret) 2359 return ret; 2360 2361 return vmw_view_bindings_add(sw_context, vmw_view_rt, 2362 vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1], 2363 num_rt_view, 0); 2364 } 2365 2366 /** 2367 * vmw_cmd_dx_clear_rendertarget_view - Validate 2368 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command 2369 * 2370 * @dev_priv: Pointer to a device private struct. 2371 * @sw_context: The software context being used for this batch. 2372 * @header: Pointer to the command header in the command stream. 2373 */ 2374 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv, 2375 struct vmw_sw_context *sw_context, 2376 SVGA3dCmdHeader *header) 2377 { 2378 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) = 2379 container_of(header, typeof(*cmd), header); 2380 2381 return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt, 2382 cmd->body.renderTargetViewId)); 2383 } 2384 2385 /** 2386 * vmw_cmd_dx_clear_rendertarget_view - Validate 2387 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command 2388 * 2389 * @dev_priv: Pointer to a device private struct. 2390 * @sw_context: The software context being used for this batch. 2391 * @header: Pointer to the command header in the command stream. 2392 */ 2393 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv, 2394 struct vmw_sw_context *sw_context, 2395 SVGA3dCmdHeader *header) 2396 { 2397 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) = 2398 container_of(header, typeof(*cmd), header); 2399 2400 return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds, 2401 cmd->body.depthStencilViewId)); 2402 } 2403 2404 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, 2405 struct vmw_sw_context *sw_context, 2406 SVGA3dCmdHeader *header) 2407 { 2408 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2409 struct vmw_resource *srf; 2410 struct vmw_resource *res; 2411 enum vmw_view_type view_type; 2412 int ret; 2413 /* 2414 * This is based on the fact that all affected define commands have the 2415 * same initial command body layout. 2416 */ 2417 struct { 2418 SVGA3dCmdHeader header; 2419 uint32 defined_id; 2420 uint32 sid; 2421 } *cmd; 2422 2423 if (!ctx_node) 2424 return -EINVAL; 2425 2426 view_type = vmw_view_cmd_to_type(header->id); 2427 if (view_type == vmw_view_max) 2428 return -EINVAL; 2429 2430 cmd = container_of(header, typeof(*cmd), header); 2431 if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) { 2432 VMW_DEBUG_USER("Invalid surface id.\n"); 2433 return -EINVAL; 2434 } 2435 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2436 VMW_RES_DIRTY_NONE, user_surface_converter, 2437 &cmd->sid, &srf); 2438 if (unlikely(ret != 0)) 2439 return ret; 2440 2441 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]); 2442 ret = vmw_cotable_notify(res, cmd->defined_id); 2443 if (unlikely(ret != 0)) 2444 return ret; 2445 2446 return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type, 2447 cmd->defined_id, header, 2448 header->size + sizeof(*header), 2449 &sw_context->staged_cmd_res); 2450 } 2451 2452 /** 2453 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command. 2454 * 2455 * @dev_priv: Pointer to a device private struct. 2456 * @sw_context: The software context being used for this batch. 2457 * @header: Pointer to the command header in the command stream. 2458 */ 2459 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv, 2460 struct vmw_sw_context *sw_context, 2461 SVGA3dCmdHeader *header) 2462 { 2463 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2464 struct vmw_ctx_bindinfo_so binding; 2465 struct vmw_resource *res; 2466 struct { 2467 SVGA3dCmdHeader header; 2468 SVGA3dCmdDXSetSOTargets body; 2469 SVGA3dSoTarget targets[]; 2470 } *cmd; 2471 int i, ret, num; 2472 2473 if (!ctx_node) 2474 return -EINVAL; 2475 2476 cmd = container_of(header, typeof(*cmd), header); 2477 num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget); 2478 2479 if (num > SVGA3D_DX_MAX_SOTARGETS) { 2480 VMW_DEBUG_USER("Invalid DX SO binding.\n"); 2481 return -EINVAL; 2482 } 2483 2484 for (i = 0; i < num; i++) { 2485 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2486 VMW_RES_DIRTY_SET, 2487 user_surface_converter, 2488 &cmd->targets[i].sid, &res); 2489 if (unlikely(ret != 0)) 2490 return ret; 2491 2492 binding.bi.ctx = ctx_node->ctx; 2493 binding.bi.res = res; 2494 binding.bi.bt = vmw_ctx_binding_so, 2495 binding.offset = cmd->targets[i].offset; 2496 binding.size = cmd->targets[i].sizeInBytes; 2497 binding.slot = i; 2498 2499 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot); 2500 } 2501 2502 return 0; 2503 } 2504 2505 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv, 2506 struct vmw_sw_context *sw_context, 2507 SVGA3dCmdHeader *header) 2508 { 2509 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2510 struct vmw_resource *res; 2511 /* 2512 * This is based on the fact that all affected define commands have 2513 * the same initial command body layout. 2514 */ 2515 struct { 2516 SVGA3dCmdHeader header; 2517 uint32 defined_id; 2518 } *cmd; 2519 enum vmw_so_type so_type; 2520 int ret; 2521 2522 if (!ctx_node) 2523 return -EINVAL; 2524 2525 so_type = vmw_so_cmd_to_type(header->id); 2526 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]); 2527 cmd = container_of(header, typeof(*cmd), header); 2528 ret = vmw_cotable_notify(res, cmd->defined_id); 2529 2530 return ret; 2531 } 2532 2533 /** 2534 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE 2535 * command 2536 * 2537 * @dev_priv: Pointer to a device private struct. 2538 * @sw_context: The software context being used for this batch. 2539 * @header: Pointer to the command header in the command stream. 2540 */ 2541 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv, 2542 struct vmw_sw_context *sw_context, 2543 SVGA3dCmdHeader *header) 2544 { 2545 struct { 2546 SVGA3dCmdHeader header; 2547 union { 2548 SVGA3dCmdDXReadbackSubResource r_body; 2549 SVGA3dCmdDXInvalidateSubResource i_body; 2550 SVGA3dCmdDXUpdateSubResource u_body; 2551 SVGA3dSurfaceId sid; 2552 }; 2553 } *cmd; 2554 2555 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) != 2556 offsetof(typeof(*cmd), sid)); 2557 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) != 2558 offsetof(typeof(*cmd), sid)); 2559 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) != 2560 offsetof(typeof(*cmd), sid)); 2561 2562 cmd = container_of(header, typeof(*cmd), header); 2563 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2564 VMW_RES_DIRTY_NONE, user_surface_converter, 2565 &cmd->sid, NULL); 2566 } 2567 2568 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv, 2569 struct vmw_sw_context *sw_context, 2570 SVGA3dCmdHeader *header) 2571 { 2572 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2573 2574 if (!ctx_node) 2575 return -EINVAL; 2576 2577 return 0; 2578 } 2579 2580 /** 2581 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view 2582 * resource for removal. 2583 * 2584 * @dev_priv: Pointer to a device private struct. 2585 * @sw_context: The software context being used for this batch. 2586 * @header: Pointer to the command header in the command stream. 2587 * 2588 * Check that the view exists, and if it was not created using this command 2589 * batch, conditionally make this command a NOP. 2590 */ 2591 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, 2592 struct vmw_sw_context *sw_context, 2593 SVGA3dCmdHeader *header) 2594 { 2595 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2596 struct { 2597 SVGA3dCmdHeader header; 2598 union vmw_view_destroy body; 2599 } *cmd = container_of(header, typeof(*cmd), header); 2600 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id); 2601 struct vmw_resource *view; 2602 int ret; 2603 2604 if (!ctx_node) 2605 return -EINVAL; 2606 2607 ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type, 2608 &sw_context->staged_cmd_res, &view); 2609 if (ret || !view) 2610 return ret; 2611 2612 /* 2613 * If the view wasn't created during this command batch, it might 2614 * have been removed due to a context swapout, so add a 2615 * relocation to conditionally make this command a NOP to avoid 2616 * device errors. 2617 */ 2618 return vmw_resource_relocation_add(sw_context, view, 2619 vmw_ptr_diff(sw_context->buf_start, 2620 &cmd->header.id), 2621 vmw_res_rel_cond_nop); 2622 } 2623 2624 /** 2625 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command 2626 * 2627 * @dev_priv: Pointer to a device private struct. 2628 * @sw_context: The software context being used for this batch. 2629 * @header: Pointer to the command header in the command stream. 2630 */ 2631 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv, 2632 struct vmw_sw_context *sw_context, 2633 SVGA3dCmdHeader *header) 2634 { 2635 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2636 struct vmw_resource *res; 2637 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) = 2638 container_of(header, typeof(*cmd), header); 2639 int ret; 2640 2641 if (!ctx_node) 2642 return -EINVAL; 2643 2644 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER); 2645 ret = vmw_cotable_notify(res, cmd->body.shaderId); 2646 if (ret) 2647 return ret; 2648 2649 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx, 2650 cmd->body.shaderId, cmd->body.type, 2651 &sw_context->staged_cmd_res); 2652 } 2653 2654 /** 2655 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command 2656 * 2657 * @dev_priv: Pointer to a device private struct. 2658 * @sw_context: The software context being used for this batch. 2659 * @header: Pointer to the command header in the command stream. 2660 */ 2661 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv, 2662 struct vmw_sw_context *sw_context, 2663 SVGA3dCmdHeader *header) 2664 { 2665 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); 2666 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) = 2667 container_of(header, typeof(*cmd), header); 2668 int ret; 2669 2670 if (!ctx_node) 2671 return -EINVAL; 2672 2673 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0, 2674 &sw_context->staged_cmd_res); 2675 2676 return ret; 2677 } 2678 2679 /** 2680 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command 2681 * 2682 * @dev_priv: Pointer to a device private struct. 2683 * @sw_context: The software context being used for this batch. 2684 * @header: Pointer to the command header in the command stream. 2685 */ 2686 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv, 2687 struct vmw_sw_context *sw_context, 2688 SVGA3dCmdHeader *header) 2689 { 2690 struct vmw_resource *ctx; 2691 struct vmw_resource *res; 2692 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) = 2693 container_of(header, typeof(*cmd), header); 2694 int ret; 2695 2696 if (cmd->body.cid != SVGA3D_INVALID_ID) { 2697 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 2698 VMW_RES_DIRTY_SET, 2699 user_context_converter, &cmd->body.cid, 2700 &ctx); 2701 if (ret) 2702 return ret; 2703 } else { 2704 struct vmw_ctx_validation_info *ctx_node = 2705 VMW_GET_CTX_NODE(sw_context); 2706 2707 if (!ctx_node) 2708 return -EINVAL; 2709 2710 ctx = ctx_node->ctx; 2711 } 2712 2713 res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0); 2714 if (IS_ERR(res)) { 2715 VMW_DEBUG_USER("Could not find shader to bind.\n"); 2716 return PTR_ERR(res); 2717 } 2718 2719 ret = vmw_execbuf_res_noctx_val_add(sw_context, res, 2720 VMW_RES_DIRTY_NONE); 2721 if (ret) { 2722 VMW_DEBUG_USER("Error creating resource validation node.\n"); 2723 return ret; 2724 } 2725 2726 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, 2727 &cmd->body.mobid, 2728 cmd->body.offsetInBytes); 2729 } 2730 2731 /** 2732 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command 2733 * 2734 * @dev_priv: Pointer to a device private struct. 2735 * @sw_context: The software context being used for this batch. 2736 * @header: Pointer to the command header in the command stream. 2737 */ 2738 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv, 2739 struct vmw_sw_context *sw_context, 2740 SVGA3dCmdHeader *header) 2741 { 2742 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) = 2743 container_of(header, typeof(*cmd), header); 2744 2745 return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr, 2746 cmd->body.shaderResourceViewId)); 2747 } 2748 2749 /** 2750 * vmw_cmd_dx_transfer_from_buffer - Validate 2751 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command 2752 * 2753 * @dev_priv: Pointer to a device private struct. 2754 * @sw_context: The software context being used for this batch. 2755 * @header: Pointer to the command header in the command stream. 2756 */ 2757 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv, 2758 struct vmw_sw_context *sw_context, 2759 SVGA3dCmdHeader *header) 2760 { 2761 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) = 2762 container_of(header, typeof(*cmd), header); 2763 int ret; 2764 2765 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2766 VMW_RES_DIRTY_NONE, user_surface_converter, 2767 &cmd->body.srcSid, NULL); 2768 if (ret != 0) 2769 return ret; 2770 2771 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2772 VMW_RES_DIRTY_SET, user_surface_converter, 2773 &cmd->body.destSid, NULL); 2774 } 2775 2776 /** 2777 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command 2778 * 2779 * @dev_priv: Pointer to a device private struct. 2780 * @sw_context: The software context being used for this batch. 2781 * @header: Pointer to the command header in the command stream. 2782 */ 2783 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv, 2784 struct vmw_sw_context *sw_context, 2785 SVGA3dCmdHeader *header) 2786 { 2787 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) = 2788 container_of(header, typeof(*cmd), header); 2789 2790 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)) 2791 return -EINVAL; 2792 2793 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2794 VMW_RES_DIRTY_SET, user_surface_converter, 2795 &cmd->body.surface.sid, NULL); 2796 } 2797 2798 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, 2799 struct vmw_sw_context *sw_context, 2800 void *buf, uint32_t *size) 2801 { 2802 uint32_t size_remaining = *size; 2803 uint32_t cmd_id; 2804 2805 cmd_id = ((uint32_t *)buf)[0]; 2806 switch (cmd_id) { 2807 case SVGA_CMD_UPDATE: 2808 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); 2809 break; 2810 case SVGA_CMD_DEFINE_GMRFB: 2811 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB); 2812 break; 2813 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: 2814 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); 2815 break; 2816 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: 2817 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); 2818 break; 2819 default: 2820 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id); 2821 return -EINVAL; 2822 } 2823 2824 if (*size > size_remaining) { 2825 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n", 2826 cmd_id); 2827 return -EINVAL; 2828 } 2829 2830 if (unlikely(!sw_context->kernel)) { 2831 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id); 2832 return -EPERM; 2833 } 2834 2835 if (cmd_id == SVGA_CMD_DEFINE_GMRFB) 2836 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf); 2837 2838 return 0; 2839 } 2840 2841 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { 2842 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, 2843 false, false, false), 2844 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, 2845 false, false, false), 2846 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check, 2847 true, false, false), 2848 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check, 2849 true, false, false), 2850 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma, 2851 true, false, false), 2852 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid, 2853 false, false, false), 2854 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid, 2855 false, false, false), 2856 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check, 2857 true, false, false), 2858 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check, 2859 true, false, false), 2860 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check, 2861 true, false, false), 2862 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, 2863 &vmw_cmd_set_render_target_check, true, false, false), 2864 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state, 2865 true, false, false), 2866 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check, 2867 true, false, false), 2868 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check, 2869 true, false, false), 2870 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check, 2871 true, false, false), 2872 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check, 2873 true, false, false), 2874 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check, 2875 true, false, false), 2876 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check, 2877 true, false, false), 2878 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, 2879 false, false, false), 2880 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define, 2881 true, false, false), 2882 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy, 2883 true, false, false), 2884 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, 2885 true, false, false), 2886 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const, 2887 true, false, false), 2888 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, 2889 true, false, false), 2890 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, 2891 true, false, false), 2892 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query, 2893 true, false, false), 2894 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query, 2895 true, false, false), 2896 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query, 2897 true, false, false), 2898 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok, 2899 true, false, false), 2900 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, 2901 &vmw_cmd_blt_surf_screen_check, false, false, false), 2902 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid, 2903 false, false, false), 2904 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid, 2905 false, false, false), 2906 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid, 2907 false, false, false), 2908 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid, 2909 false, false, false), 2910 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid, 2911 false, false, false), 2912 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid, 2913 false, false, false), 2914 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid, 2915 false, false, false), 2916 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid, 2917 false, false, false), 2918 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid, 2919 false, false, false), 2920 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid, 2921 false, false, false), 2922 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid, 2923 false, false, false), 2924 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid, 2925 false, false, false), 2926 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid, 2927 false, false, false), 2928 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid, 2929 false, false, true), 2930 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid, 2931 false, false, true), 2932 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid, 2933 false, false, true), 2934 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, 2935 false, false, true), 2936 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid, 2937 false, false, true), 2938 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, 2939 false, false, true), 2940 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, 2941 false, false, true), 2942 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid, 2943 false, false, true), 2944 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface, 2945 true, false, true), 2946 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid, 2947 false, false, true), 2948 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image, 2949 true, false, true), 2950 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE, 2951 &vmw_cmd_update_gb_surface, true, false, true), 2952 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE, 2953 &vmw_cmd_readback_gb_image, true, false, true), 2954 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE, 2955 &vmw_cmd_readback_gb_surface, true, false, true), 2956 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE, 2957 &vmw_cmd_invalidate_gb_image, true, false, true), 2958 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE, 2959 &vmw_cmd_invalidate_gb_surface, true, false, true), 2960 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid, 2961 false, false, true), 2962 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid, 2963 false, false, true), 2964 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid, 2965 false, false, true), 2966 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid, 2967 false, false, true), 2968 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid, 2969 false, false, true), 2970 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid, 2971 false, false, true), 2972 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader, 2973 true, false, true), 2974 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid, 2975 false, false, true), 2976 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid, 2977 false, false, false), 2978 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query, 2979 true, false, true), 2980 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query, 2981 true, false, true), 2982 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query, 2983 true, false, true), 2984 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, 2985 true, false, true), 2986 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok, 2987 true, false, true), 2988 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, 2989 false, false, true), 2990 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, 2991 false, false, true), 2992 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid, 2993 false, false, true), 2994 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid, 2995 false, false, true), 2996 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid, 2997 false, false, true), 2998 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid, 2999 false, false, true), 3000 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid, 3001 false, false, true), 3002 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid, 3003 false, false, true), 3004 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, 3005 false, false, true), 3006 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, 3007 false, false, true), 3008 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, 3009 true, false, true), 3010 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid, 3011 false, false, true), 3012 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid, 3013 false, false, true), 3014 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid, 3015 false, false, true), 3016 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid, 3017 false, false, true), 3018 3019 /* SM commands */ 3020 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid, 3021 false, false, true), 3022 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid, 3023 false, false, true), 3024 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid, 3025 false, false, true), 3026 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid, 3027 false, false, true), 3028 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid, 3029 false, false, true), 3030 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER, 3031 &vmw_cmd_dx_set_single_constant_buffer, true, false, true), 3032 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES, 3033 &vmw_cmd_dx_set_shader_res, true, false, true), 3034 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader, 3035 true, false, true), 3036 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check, 3037 true, false, true), 3038 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check, 3039 true, false, true), 3040 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check, 3041 true, false, true), 3042 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check, 3043 true, false, true), 3044 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED, 3045 &vmw_cmd_dx_cid_check, true, false, true), 3046 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check, 3047 true, false, true), 3048 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS, 3049 &vmw_cmd_dx_set_vertex_buffers, true, false, true), 3050 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER, 3051 &vmw_cmd_dx_set_index_buffer, true, false, true), 3052 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS, 3053 &vmw_cmd_dx_set_rendertargets, true, false, true), 3054 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check, 3055 true, false, true), 3056 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE, 3057 &vmw_cmd_dx_cid_check, true, false, true), 3058 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE, 3059 &vmw_cmd_dx_cid_check, true, false, true), 3060 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query, 3061 true, false, true), 3062 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check, 3063 true, false, true), 3064 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query, 3065 true, false, true), 3066 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET, 3067 &vmw_cmd_dx_cid_check, true, false, true), 3068 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check, 3069 true, false, true), 3070 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check, 3071 true, false, true), 3072 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid, 3073 true, false, true), 3074 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check, 3075 true, false, true), 3076 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check, 3077 true, false, true), 3078 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check, 3079 true, false, true), 3080 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW, 3081 &vmw_cmd_dx_clear_rendertarget_view, true, false, true), 3082 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW, 3083 &vmw_cmd_dx_clear_depthstencil_view, true, false, true), 3084 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid, 3085 true, false, true), 3086 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips, 3087 true, false, true), 3088 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE, 3089 &vmw_cmd_dx_check_subresource, true, false, true), 3090 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE, 3091 &vmw_cmd_dx_check_subresource, true, false, true), 3092 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE, 3093 &vmw_cmd_dx_check_subresource, true, false, true), 3094 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW, 3095 &vmw_cmd_dx_view_define, true, false, true), 3096 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW, 3097 &vmw_cmd_dx_view_remove, true, false, true), 3098 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW, 3099 &vmw_cmd_dx_view_define, true, false, true), 3100 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW, 3101 &vmw_cmd_dx_view_remove, true, false, true), 3102 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW, 3103 &vmw_cmd_dx_view_define, true, false, true), 3104 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW, 3105 &vmw_cmd_dx_view_remove, true, false, true), 3106 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT, 3107 &vmw_cmd_dx_so_define, true, false, true), 3108 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT, 3109 &vmw_cmd_dx_cid_check, true, false, true), 3110 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE, 3111 &vmw_cmd_dx_so_define, true, false, true), 3112 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE, 3113 &vmw_cmd_dx_cid_check, true, false, true), 3114 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE, 3115 &vmw_cmd_dx_so_define, true, false, true), 3116 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE, 3117 &vmw_cmd_dx_cid_check, true, false, true), 3118 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE, 3119 &vmw_cmd_dx_so_define, true, false, true), 3120 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE, 3121 &vmw_cmd_dx_cid_check, true, false, true), 3122 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE, 3123 &vmw_cmd_dx_so_define, true, false, true), 3124 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE, 3125 &vmw_cmd_dx_cid_check, true, false, true), 3126 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER, 3127 &vmw_cmd_dx_define_shader, true, false, true), 3128 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER, 3129 &vmw_cmd_dx_destroy_shader, true, false, true), 3130 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER, 3131 &vmw_cmd_dx_bind_shader, true, false, true), 3132 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT, 3133 &vmw_cmd_dx_so_define, true, false, true), 3134 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT, 3135 &vmw_cmd_dx_cid_check, true, false, true), 3136 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check, 3137 true, false, true), 3138 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS, 3139 &vmw_cmd_dx_set_so_targets, true, false, true), 3140 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT, 3141 &vmw_cmd_dx_cid_check, true, false, true), 3142 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY, 3143 &vmw_cmd_dx_cid_check, true, false, true), 3144 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY, 3145 &vmw_cmd_buffer_copy_check, true, false, true), 3146 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION, 3147 &vmw_cmd_pred_copy_check, true, false, true), 3148 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER, 3149 &vmw_cmd_dx_transfer_from_buffer, 3150 true, false, true), 3151 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy, 3152 true, false, true), 3153 }; 3154 3155 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd) 3156 { 3157 u32 cmd_id = ((u32 *) buf)[0]; 3158 3159 if (cmd_id >= SVGA_CMD_MAX) { 3160 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; 3161 const struct vmw_cmd_entry *entry; 3162 3163 *size = header->size + sizeof(SVGA3dCmdHeader); 3164 cmd_id = header->id; 3165 if (cmd_id >= SVGA_3D_CMD_MAX) 3166 return false; 3167 3168 cmd_id -= SVGA_3D_CMD_BASE; 3169 entry = &vmw_cmd_entries[cmd_id]; 3170 *cmd = entry->cmd_name; 3171 return true; 3172 } 3173 3174 switch (cmd_id) { 3175 case SVGA_CMD_UPDATE: 3176 *cmd = "SVGA_CMD_UPDATE"; 3177 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate); 3178 break; 3179 case SVGA_CMD_DEFINE_GMRFB: 3180 *cmd = "SVGA_CMD_DEFINE_GMRFB"; 3181 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB); 3182 break; 3183 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: 3184 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN"; 3185 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); 3186 break; 3187 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: 3188 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB"; 3189 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); 3190 break; 3191 default: 3192 *cmd = "UNKNOWN"; 3193 *size = 0; 3194 return false; 3195 } 3196 3197 return true; 3198 } 3199 3200 static int vmw_cmd_check(struct vmw_private *dev_priv, 3201 struct vmw_sw_context *sw_context, void *buf, 3202 uint32_t *size) 3203 { 3204 uint32_t cmd_id; 3205 uint32_t size_remaining = *size; 3206 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; 3207 int ret; 3208 const struct vmw_cmd_entry *entry; 3209 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; 3210 3211 cmd_id = ((uint32_t *)buf)[0]; 3212 /* Handle any none 3D commands */ 3213 if (unlikely(cmd_id < SVGA_CMD_MAX)) 3214 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); 3215 3216 3217 cmd_id = header->id; 3218 *size = header->size + sizeof(SVGA3dCmdHeader); 3219 3220 cmd_id -= SVGA_3D_CMD_BASE; 3221 if (unlikely(*size > size_remaining)) 3222 goto out_invalid; 3223 3224 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) 3225 goto out_invalid; 3226 3227 entry = &vmw_cmd_entries[cmd_id]; 3228 if (unlikely(!entry->func)) 3229 goto out_invalid; 3230 3231 if (unlikely(!entry->user_allow && !sw_context->kernel)) 3232 goto out_privileged; 3233 3234 if (unlikely(entry->gb_disable && gb)) 3235 goto out_old; 3236 3237 if (unlikely(entry->gb_enable && !gb)) 3238 goto out_new; 3239 3240 ret = entry->func(dev_priv, sw_context, header); 3241 if (unlikely(ret != 0)) { 3242 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n", 3243 cmd_id + SVGA_3D_CMD_BASE, ret); 3244 return ret; 3245 } 3246 3247 return 0; 3248 out_invalid: 3249 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n", 3250 cmd_id + SVGA_3D_CMD_BASE); 3251 return -EINVAL; 3252 out_privileged: 3253 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n", 3254 cmd_id + SVGA_3D_CMD_BASE); 3255 return -EPERM; 3256 out_old: 3257 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n", 3258 cmd_id + SVGA_3D_CMD_BASE); 3259 return -EINVAL; 3260 out_new: 3261 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n", 3262 cmd_id + SVGA_3D_CMD_BASE); 3263 return -EINVAL; 3264 } 3265 3266 static int vmw_cmd_check_all(struct vmw_private *dev_priv, 3267 struct vmw_sw_context *sw_context, void *buf, 3268 uint32_t size) 3269 { 3270 int32_t cur_size = size; 3271 int ret; 3272 3273 sw_context->buf_start = buf; 3274 3275 while (cur_size > 0) { 3276 size = cur_size; 3277 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); 3278 if (unlikely(ret != 0)) 3279 return ret; 3280 buf = (void *)((unsigned long) buf + size); 3281 cur_size -= size; 3282 } 3283 3284 if (unlikely(cur_size != 0)) { 3285 VMW_DEBUG_USER("Command verifier out of sync.\n"); 3286 return -EINVAL; 3287 } 3288 3289 return 0; 3290 } 3291 3292 static void vmw_free_relocations(struct vmw_sw_context *sw_context) 3293 { 3294 /* Memory is validation context memory, so no need to free it */ 3295 INIT_LIST_HEAD(&sw_context->bo_relocations); 3296 } 3297 3298 static void vmw_apply_relocations(struct vmw_sw_context *sw_context) 3299 { 3300 struct vmw_relocation *reloc; 3301 struct ttm_buffer_object *bo; 3302 3303 list_for_each_entry(reloc, &sw_context->bo_relocations, head) { 3304 bo = &reloc->vbo->base; 3305 switch (bo->mem.mem_type) { 3306 case TTM_PL_VRAM: 3307 reloc->location->offset += bo->offset; 3308 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; 3309 break; 3310 case VMW_PL_GMR: 3311 reloc->location->gmrId = bo->mem.start; 3312 break; 3313 case VMW_PL_MOB: 3314 *reloc->mob_loc = bo->mem.start; 3315 break; 3316 default: 3317 BUG(); 3318 } 3319 } 3320 vmw_free_relocations(sw_context); 3321 } 3322 3323 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, 3324 uint32_t size) 3325 { 3326 if (likely(sw_context->cmd_bounce_size >= size)) 3327 return 0; 3328 3329 if (sw_context->cmd_bounce_size == 0) 3330 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE; 3331 3332 while (sw_context->cmd_bounce_size < size) { 3333 sw_context->cmd_bounce_size = 3334 PAGE_ALIGN(sw_context->cmd_bounce_size + 3335 (sw_context->cmd_bounce_size >> 1)); 3336 } 3337 3338 vfree(sw_context->cmd_bounce); 3339 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size); 3340 3341 if (sw_context->cmd_bounce == NULL) { 3342 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n"); 3343 sw_context->cmd_bounce_size = 0; 3344 return -ENOMEM; 3345 } 3346 3347 return 0; 3348 } 3349 3350 /** 3351 * vmw_execbuf_fence_commands - create and submit a command stream fence 3352 * 3353 * Creates a fence object and submits a command stream marker. 3354 * If this fails for some reason, We sync the fifo and return NULL. 3355 * It is then safe to fence buffers with a NULL pointer. 3356 * 3357 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a 3358 * userspace handle if @p_handle is not NULL, otherwise not. 3359 */ 3360 3361 int vmw_execbuf_fence_commands(struct drm_file *file_priv, 3362 struct vmw_private *dev_priv, 3363 struct vmw_fence_obj **p_fence, 3364 uint32_t *p_handle) 3365 { 3366 uint32_t sequence; 3367 int ret; 3368 bool synced = false; 3369 3370 /* p_handle implies file_priv. */ 3371 BUG_ON(p_handle != NULL && file_priv == NULL); 3372 3373 ret = vmw_fifo_send_fence(dev_priv, &sequence); 3374 if (unlikely(ret != 0)) { 3375 VMW_DEBUG_USER("Fence submission error. Syncing.\n"); 3376 synced = true; 3377 } 3378 3379 if (p_handle != NULL) 3380 ret = vmw_user_fence_create(file_priv, dev_priv->fman, 3381 sequence, p_fence, p_handle); 3382 else 3383 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence); 3384 3385 if (unlikely(ret != 0 && !synced)) { 3386 (void) vmw_fallback_wait(dev_priv, false, false, sequence, 3387 false, VMW_FENCE_WAIT_TIMEOUT); 3388 *p_fence = NULL; 3389 } 3390 3391 return ret; 3392 } 3393 3394 /** 3395 * vmw_execbuf_copy_fence_user - copy fence object information to user-space. 3396 * 3397 * @dev_priv: Pointer to a vmw_private struct. 3398 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file. 3399 * @ret: Return value from fence object creation. 3400 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which 3401 * the information should be copied. 3402 * @fence: Pointer to the fenc object. 3403 * @fence_handle: User-space fence handle. 3404 * @out_fence_fd: exported file descriptor for the fence. -1 if not used 3405 * @sync_file: Only used to clean up in case of an error in this function. 3406 * 3407 * This function copies fence information to user-space. If copying fails, the 3408 * user-space struct drm_vmw_fence_rep::error member is hopefully left 3409 * untouched, and if it's preloaded with an -EFAULT by user-space, the error 3410 * will hopefully be detected. 3411 * 3412 * Also if copying fails, user-space will be unable to signal the fence object 3413 * so we wait for it immediately, and then unreference the user-space reference. 3414 */ 3415 void 3416 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, 3417 struct vmw_fpriv *vmw_fp, int ret, 3418 struct drm_vmw_fence_rep __user *user_fence_rep, 3419 struct vmw_fence_obj *fence, uint32_t fence_handle, 3420 int32_t out_fence_fd, struct sync_file *sync_file) 3421 { 3422 struct drm_vmw_fence_rep fence_rep; 3423 3424 if (user_fence_rep == NULL) 3425 return; 3426 3427 memset(&fence_rep, 0, sizeof(fence_rep)); 3428 3429 fence_rep.error = ret; 3430 fence_rep.fd = out_fence_fd; 3431 if (ret == 0) { 3432 BUG_ON(fence == NULL); 3433 3434 fence_rep.handle = fence_handle; 3435 fence_rep.seqno = fence->base.seqno; 3436 vmw_update_seqno(dev_priv, &dev_priv->fifo); 3437 fence_rep.passed_seqno = dev_priv->last_read_seqno; 3438 } 3439 3440 /* 3441 * copy_to_user errors will be detected by user space not seeing 3442 * fence_rep::error filled in. Typically user-space would have pre-set 3443 * that member to -EFAULT. 3444 */ 3445 ret = copy_to_user(user_fence_rep, &fence_rep, 3446 sizeof(fence_rep)); 3447 3448 /* 3449 * User-space lost the fence object. We need to sync and unreference the 3450 * handle. 3451 */ 3452 if (unlikely(ret != 0) && (fence_rep.error == 0)) { 3453 if (sync_file) 3454 fput(sync_file->file); 3455 3456 if (fence_rep.fd != -1) { 3457 put_unused_fd(fence_rep.fd); 3458 fence_rep.fd = -1; 3459 } 3460 3461 ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle, 3462 TTM_REF_USAGE); 3463 VMW_DEBUG_USER("Fence copy error. Syncing.\n"); 3464 (void) vmw_fence_obj_wait(fence, false, false, 3465 VMW_FENCE_WAIT_TIMEOUT); 3466 } 3467 } 3468 3469 /** 3470 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo. 3471 * 3472 * @dev_priv: Pointer to a device private structure. 3473 * @kernel_commands: Pointer to the unpatched command batch. 3474 * @command_size: Size of the unpatched command batch. 3475 * @sw_context: Structure holding the relocation lists. 3476 * 3477 * Side effects: If this function returns 0, then the command batch pointed to 3478 * by @kernel_commands will have been modified. 3479 */ 3480 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv, 3481 void *kernel_commands, u32 command_size, 3482 struct vmw_sw_context *sw_context) 3483 { 3484 void *cmd; 3485 3486 if (sw_context->dx_ctx_node) 3487 cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size, 3488 sw_context->dx_ctx_node->ctx->id); 3489 else 3490 cmd = VMW_FIFO_RESERVE(dev_priv, command_size); 3491 3492 if (!cmd) 3493 return -ENOMEM; 3494 3495 vmw_apply_relocations(sw_context); 3496 memcpy(cmd, kernel_commands, command_size); 3497 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); 3498 vmw_resource_relocations_free(&sw_context->res_relocations); 3499 vmw_fifo_commit(dev_priv, command_size); 3500 3501 return 0; 3502 } 3503 3504 /** 3505 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the 3506 * command buffer manager. 3507 * 3508 * @dev_priv: Pointer to a device private structure. 3509 * @header: Opaque handle to the command buffer allocation. 3510 * @command_size: Size of the unpatched command batch. 3511 * @sw_context: Structure holding the relocation lists. 3512 * 3513 * Side effects: If this function returns 0, then the command buffer represented 3514 * by @header will have been modified. 3515 */ 3516 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv, 3517 struct vmw_cmdbuf_header *header, 3518 u32 command_size, 3519 struct vmw_sw_context *sw_context) 3520 { 3521 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id : 3522 SVGA3D_INVALID_ID); 3523 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false, 3524 header); 3525 3526 vmw_apply_relocations(sw_context); 3527 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); 3528 vmw_resource_relocations_free(&sw_context->res_relocations); 3529 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false); 3530 3531 return 0; 3532 } 3533 3534 /** 3535 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for 3536 * submission using a command buffer. 3537 * 3538 * @dev_priv: Pointer to a device private structure. 3539 * @user_commands: User-space pointer to the commands to be submitted. 3540 * @command_size: Size of the unpatched command batch. 3541 * @header: Out parameter returning the opaque pointer to the command buffer. 3542 * 3543 * This function checks whether we can use the command buffer manager for 3544 * submission and if so, creates a command buffer of suitable size and copies 3545 * the user data into that buffer. 3546 * 3547 * On successful return, the function returns a pointer to the data in the 3548 * command buffer and *@header is set to non-NULL. 3549 * 3550 * If command buffers could not be used, the function will return the value of 3551 * @kernel_commands on function call. That value may be NULL. In that case, the 3552 * value of *@header will be set to NULL. 3553 * 3554 * If an error is encountered, the function will return a pointer error value. 3555 * If the function is interrupted by a signal while sleeping, it will return 3556 * -ERESTARTSYS casted to a pointer error value. 3557 */ 3558 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv, 3559 void __user *user_commands, 3560 void *kernel_commands, u32 command_size, 3561 struct vmw_cmdbuf_header **header) 3562 { 3563 size_t cmdbuf_size; 3564 int ret; 3565 3566 *header = NULL; 3567 if (command_size > SVGA_CB_MAX_SIZE) { 3568 VMW_DEBUG_USER("Command buffer is too large.\n"); 3569 return ERR_PTR(-EINVAL); 3570 } 3571 3572 if (!dev_priv->cman || kernel_commands) 3573 return kernel_commands; 3574 3575 /* If possible, add a little space for fencing. */ 3576 cmdbuf_size = command_size + 512; 3577 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE); 3578 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true, 3579 header); 3580 if (IS_ERR(kernel_commands)) 3581 return kernel_commands; 3582 3583 ret = copy_from_user(kernel_commands, user_commands, command_size); 3584 if (ret) { 3585 VMW_DEBUG_USER("Failed copying commands.\n"); 3586 vmw_cmdbuf_header_free(*header); 3587 *header = NULL; 3588 return ERR_PTR(-EFAULT); 3589 } 3590 3591 return kernel_commands; 3592 } 3593 3594 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv, 3595 struct vmw_sw_context *sw_context, 3596 uint32_t handle) 3597 { 3598 struct vmw_resource *res; 3599 int ret; 3600 unsigned int size; 3601 3602 if (handle == SVGA3D_INVALID_ID) 3603 return 0; 3604 3605 size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context); 3606 ret = vmw_validation_preload_res(sw_context->ctx, size); 3607 if (ret) 3608 return ret; 3609 3610 res = vmw_user_resource_noref_lookup_handle 3611 (dev_priv, sw_context->fp->tfile, handle, 3612 user_context_converter); 3613 if (IS_ERR(res)) { 3614 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n", 3615 (unsigned int) handle); 3616 return PTR_ERR(res); 3617 } 3618 3619 ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET); 3620 if (unlikely(ret != 0)) 3621 return ret; 3622 3623 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res); 3624 sw_context->man = vmw_context_res_man(res); 3625 3626 return 0; 3627 } 3628 3629 int vmw_execbuf_process(struct drm_file *file_priv, 3630 struct vmw_private *dev_priv, 3631 void __user *user_commands, void *kernel_commands, 3632 uint32_t command_size, uint64_t throttle_us, 3633 uint32_t dx_context_handle, 3634 struct drm_vmw_fence_rep __user *user_fence_rep, 3635 struct vmw_fence_obj **out_fence, uint32_t flags) 3636 { 3637 struct vmw_sw_context *sw_context = &dev_priv->ctx; 3638 struct vmw_fence_obj *fence = NULL; 3639 struct vmw_cmdbuf_header *header; 3640 uint32_t handle = 0; 3641 int ret; 3642 int32_t out_fence_fd = -1; 3643 struct sync_file *sync_file = NULL; 3644 DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1); 3645 3646 vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm); 3647 3648 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { 3649 out_fence_fd = get_unused_fd_flags(O_CLOEXEC); 3650 if (out_fence_fd < 0) { 3651 VMW_DEBUG_USER("Failed to get a fence fd.\n"); 3652 return out_fence_fd; 3653 } 3654 } 3655 3656 if (throttle_us) { 3657 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, 3658 throttle_us); 3659 3660 if (ret) 3661 goto out_free_fence_fd; 3662 } 3663 3664 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands, 3665 kernel_commands, command_size, 3666 &header); 3667 if (IS_ERR(kernel_commands)) { 3668 ret = PTR_ERR(kernel_commands); 3669 goto out_free_fence_fd; 3670 } 3671 3672 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); 3673 if (ret) { 3674 ret = -ERESTARTSYS; 3675 goto out_free_header; 3676 } 3677 3678 sw_context->kernel = false; 3679 if (kernel_commands == NULL) { 3680 ret = vmw_resize_cmd_bounce(sw_context, command_size); 3681 if (unlikely(ret != 0)) 3682 goto out_unlock; 3683 3684 ret = copy_from_user(sw_context->cmd_bounce, user_commands, 3685 command_size); 3686 if (unlikely(ret != 0)) { 3687 ret = -EFAULT; 3688 VMW_DEBUG_USER("Failed copying commands.\n"); 3689 goto out_unlock; 3690 } 3691 3692 kernel_commands = sw_context->cmd_bounce; 3693 } else if (!header) { 3694 sw_context->kernel = true; 3695 } 3696 3697 sw_context->fp = vmw_fpriv(file_priv); 3698 INIT_LIST_HEAD(&sw_context->ctx_list); 3699 sw_context->cur_query_bo = dev_priv->pinned_bo; 3700 sw_context->last_query_ctx = NULL; 3701 sw_context->needs_post_query_barrier = false; 3702 sw_context->dx_ctx_node = NULL; 3703 sw_context->dx_query_mob = NULL; 3704 sw_context->dx_query_ctx = NULL; 3705 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); 3706 INIT_LIST_HEAD(&sw_context->res_relocations); 3707 INIT_LIST_HEAD(&sw_context->bo_relocations); 3708 3709 if (sw_context->staged_bindings) 3710 vmw_binding_state_reset(sw_context->staged_bindings); 3711 3712 if (!sw_context->res_ht_initialized) { 3713 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); 3714 if (unlikely(ret != 0)) 3715 goto out_unlock; 3716 3717 sw_context->res_ht_initialized = true; 3718 } 3719 3720 INIT_LIST_HEAD(&sw_context->staged_cmd_res); 3721 sw_context->ctx = &val_ctx; 3722 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle); 3723 if (unlikely(ret != 0)) 3724 goto out_err_nores; 3725 3726 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, 3727 command_size); 3728 if (unlikely(ret != 0)) 3729 goto out_err_nores; 3730 3731 ret = vmw_resources_reserve(sw_context); 3732 if (unlikely(ret != 0)) 3733 goto out_err_nores; 3734 3735 ret = vmw_validation_bo_reserve(&val_ctx, true); 3736 if (unlikely(ret != 0)) 3737 goto out_err_nores; 3738 3739 ret = vmw_validation_bo_validate(&val_ctx, true); 3740 if (unlikely(ret != 0)) 3741 goto out_err; 3742 3743 ret = vmw_validation_res_validate(&val_ctx, true); 3744 if (unlikely(ret != 0)) 3745 goto out_err; 3746 3747 vmw_validation_drop_ht(&val_ctx); 3748 3749 ret = mutex_lock_interruptible(&dev_priv->binding_mutex); 3750 if (unlikely(ret != 0)) { 3751 ret = -ERESTARTSYS; 3752 goto out_err; 3753 } 3754 3755 if (dev_priv->has_mob) { 3756 ret = vmw_rebind_contexts(sw_context); 3757 if (unlikely(ret != 0)) 3758 goto out_unlock_binding; 3759 } 3760 3761 if (!header) { 3762 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands, 3763 command_size, sw_context); 3764 } else { 3765 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size, 3766 sw_context); 3767 header = NULL; 3768 } 3769 mutex_unlock(&dev_priv->binding_mutex); 3770 if (ret) 3771 goto out_err; 3772 3773 vmw_query_bo_switch_commit(dev_priv, sw_context); 3774 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence, 3775 (user_fence_rep) ? &handle : NULL); 3776 /* 3777 * This error is harmless, because if fence submission fails, 3778 * vmw_fifo_send_fence will sync. The error will be propagated to 3779 * user-space in @fence_rep 3780 */ 3781 if (ret != 0) 3782 VMW_DEBUG_USER("Fence submission error. Syncing.\n"); 3783 3784 vmw_execbuf_bindings_commit(sw_context, false); 3785 vmw_bind_dx_query_mob(sw_context); 3786 vmw_validation_res_unreserve(&val_ctx, false); 3787 3788 vmw_validation_bo_fence(sw_context->ctx, fence); 3789 3790 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid)) 3791 __vmw_execbuf_release_pinned_bo(dev_priv, fence); 3792 3793 /* 3794 * If anything fails here, give up trying to export the fence and do a 3795 * sync since the user mode will not be able to sync the fence itself. 3796 * This ensures we are still functionally correct. 3797 */ 3798 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { 3799 3800 sync_file = sync_file_create(&fence->base); 3801 if (!sync_file) { 3802 VMW_DEBUG_USER("Sync file create failed for fence\n"); 3803 put_unused_fd(out_fence_fd); 3804 out_fence_fd = -1; 3805 3806 (void) vmw_fence_obj_wait(fence, false, false, 3807 VMW_FENCE_WAIT_TIMEOUT); 3808 } else { 3809 /* Link the fence with the FD created earlier */ 3810 fd_install(out_fence_fd, sync_file->file); 3811 } 3812 } 3813 3814 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, 3815 user_fence_rep, fence, handle, out_fence_fd, 3816 sync_file); 3817 3818 /* Don't unreference when handing fence out */ 3819 if (unlikely(out_fence != NULL)) { 3820 *out_fence = fence; 3821 fence = NULL; 3822 } else if (likely(fence != NULL)) { 3823 vmw_fence_obj_unreference(&fence); 3824 } 3825 3826 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res); 3827 mutex_unlock(&dev_priv->cmdbuf_mutex); 3828 3829 /* 3830 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks 3831 * in resource destruction paths. 3832 */ 3833 vmw_validation_unref_lists(&val_ctx); 3834 3835 return 0; 3836 3837 out_unlock_binding: 3838 mutex_unlock(&dev_priv->binding_mutex); 3839 out_err: 3840 vmw_validation_bo_backoff(&val_ctx); 3841 out_err_nores: 3842 vmw_execbuf_bindings_commit(sw_context, true); 3843 vmw_validation_res_unreserve(&val_ctx, true); 3844 vmw_resource_relocations_free(&sw_context->res_relocations); 3845 vmw_free_relocations(sw_context); 3846 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid)) 3847 __vmw_execbuf_release_pinned_bo(dev_priv, NULL); 3848 out_unlock: 3849 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res); 3850 vmw_validation_drop_ht(&val_ctx); 3851 WARN_ON(!list_empty(&sw_context->ctx_list)); 3852 mutex_unlock(&dev_priv->cmdbuf_mutex); 3853 3854 /* 3855 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks 3856 * in resource destruction paths. 3857 */ 3858 vmw_validation_unref_lists(&val_ctx); 3859 out_free_header: 3860 if (header) 3861 vmw_cmdbuf_header_free(header); 3862 out_free_fence_fd: 3863 if (out_fence_fd >= 0) 3864 put_unused_fd(out_fence_fd); 3865 3866 return ret; 3867 } 3868 3869 /** 3870 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer. 3871 * 3872 * @dev_priv: The device private structure. 3873 * 3874 * This function is called to idle the fifo and unpin the query buffer if the 3875 * normal way to do this hits an error, which should typically be extremely 3876 * rare. 3877 */ 3878 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) 3879 { 3880 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n"); 3881 3882 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); 3883 vmw_bo_pin_reserved(dev_priv->pinned_bo, false); 3884 if (dev_priv->dummy_query_bo_pinned) { 3885 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false); 3886 dev_priv->dummy_query_bo_pinned = false; 3887 } 3888 } 3889 3890 3891 /** 3892 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query 3893 * bo. 3894 * 3895 * @dev_priv: The device private structure. 3896 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a 3897 * query barrier that flushes all queries touching the current buffer pointed to 3898 * by @dev_priv->pinned_bo 3899 * 3900 * This function should be used to unpin the pinned query bo, or as a query 3901 * barrier when we need to make sure that all queries have finished before the 3902 * next fifo command. (For example on hardware context destructions where the 3903 * hardware may otherwise leak unfinished queries). 3904 * 3905 * This function does not return any failure codes, but make attempts to do safe 3906 * unpinning in case of errors. 3907 * 3908 * The function will synchronize on the previous query barrier, and will thus 3909 * not finish until that barrier has executed. 3910 * 3911 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before 3912 * calling this function. 3913 */ 3914 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, 3915 struct vmw_fence_obj *fence) 3916 { 3917 int ret = 0; 3918 struct vmw_fence_obj *lfence = NULL; 3919 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); 3920 3921 if (dev_priv->pinned_bo == NULL) 3922 goto out_unlock; 3923 3924 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false, 3925 false); 3926 if (ret) 3927 goto out_no_reserve; 3928 3929 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false, 3930 false); 3931 if (ret) 3932 goto out_no_reserve; 3933 3934 ret = vmw_validation_bo_reserve(&val_ctx, false); 3935 if (ret) 3936 goto out_no_reserve; 3937 3938 if (dev_priv->query_cid_valid) { 3939 BUG_ON(fence != NULL); 3940 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); 3941 if (ret) 3942 goto out_no_emit; 3943 dev_priv->query_cid_valid = false; 3944 } 3945 3946 vmw_bo_pin_reserved(dev_priv->pinned_bo, false); 3947 if (dev_priv->dummy_query_bo_pinned) { 3948 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false); 3949 dev_priv->dummy_query_bo_pinned = false; 3950 } 3951 if (fence == NULL) { 3952 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence, 3953 NULL); 3954 fence = lfence; 3955 } 3956 vmw_validation_bo_fence(&val_ctx, fence); 3957 if (lfence != NULL) 3958 vmw_fence_obj_unreference(&lfence); 3959 3960 vmw_validation_unref_lists(&val_ctx); 3961 vmw_bo_unreference(&dev_priv->pinned_bo); 3962 3963 out_unlock: 3964 return; 3965 out_no_emit: 3966 vmw_validation_bo_backoff(&val_ctx); 3967 out_no_reserve: 3968 vmw_validation_unref_lists(&val_ctx); 3969 vmw_execbuf_unpin_panic(dev_priv); 3970 vmw_bo_unreference(&dev_priv->pinned_bo); 3971 } 3972 3973 /** 3974 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo. 3975 * 3976 * @dev_priv: The device private structure. 3977 * 3978 * This function should be used to unpin the pinned query bo, or as a query 3979 * barrier when we need to make sure that all queries have finished before the 3980 * next fifo command. (For example on hardware context destructions where the 3981 * hardware may otherwise leak unfinished queries). 3982 * 3983 * This function does not return any failure codes, but make attempts to do safe 3984 * unpinning in case of errors. 3985 * 3986 * The function will synchronize on the previous query barrier, and will thus 3987 * not finish until that barrier has executed. 3988 */ 3989 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) 3990 { 3991 mutex_lock(&dev_priv->cmdbuf_mutex); 3992 if (dev_priv->query_cid_valid) 3993 __vmw_execbuf_release_pinned_bo(dev_priv, NULL); 3994 mutex_unlock(&dev_priv->cmdbuf_mutex); 3995 } 3996 3997 int vmw_execbuf_ioctl(struct drm_device *dev, void *data, 3998 struct drm_file *file_priv) 3999 { 4000 struct vmw_private *dev_priv = vmw_priv(dev); 4001 struct drm_vmw_execbuf_arg *arg = data; 4002 int ret; 4003 struct dma_fence *in_fence = NULL; 4004 4005 /* 4006 * Extend the ioctl argument while maintaining backwards compatibility: 4007 * We take different code paths depending on the value of arg->version. 4008 * 4009 * Note: The ioctl argument is extended and zeropadded by core DRM. 4010 */ 4011 if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION || 4012 arg->version == 0)) { 4013 VMW_DEBUG_USER("Incorrect execbuf version.\n"); 4014 return -EINVAL; 4015 } 4016 4017 switch (arg->version) { 4018 case 1: 4019 /* For v1 core DRM have extended + zeropadded the data */ 4020 arg->context_handle = (uint32_t) -1; 4021 break; 4022 case 2: 4023 default: 4024 /* For v2 and later core DRM would have correctly copied it */ 4025 break; 4026 } 4027 4028 /* If imported a fence FD from elsewhere, then wait on it */ 4029 if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) { 4030 in_fence = sync_file_get_fence(arg->imported_fence_fd); 4031 4032 if (!in_fence) { 4033 VMW_DEBUG_USER("Cannot get imported fence\n"); 4034 return -EINVAL; 4035 } 4036 4037 ret = vmw_wait_dma_fence(dev_priv->fman, in_fence); 4038 if (ret) 4039 goto out; 4040 } 4041 4042 ret = ttm_read_lock(&dev_priv->reservation_sem, true); 4043 if (unlikely(ret != 0)) 4044 return ret; 4045 4046 ret = vmw_execbuf_process(file_priv, dev_priv, 4047 (void __user *)(unsigned long)arg->commands, 4048 NULL, arg->command_size, arg->throttle_us, 4049 arg->context_handle, 4050 (void __user *)(unsigned long)arg->fence_rep, 4051 NULL, arg->flags); 4052 4053 ttm_read_unlock(&dev_priv->reservation_sem); 4054 if (unlikely(ret != 0)) 4055 goto out; 4056 4057 vmw_kms_cursor_post_execbuf(dev_priv); 4058 4059 out: 4060 if (in_fence) 4061 dma_fence_put(in_fence); 4062 return ret; 4063 } 4064