1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 #include <linux/slab.h> 29 #include "vmwgfx_validation.h" 30 #include "vmwgfx_drv.h" 31 32 /** 33 * struct vmw_validation_bo_node - Buffer object validation metadata. 34 * @base: Metadata used for TTM reservation- and validation. 35 * @hash: A hash entry used for the duplicate detection hash table. 36 * @as_mob: Validate as mob. 37 * @cpu_blit: Validate for cpu blit access. 38 * 39 * Bit fields are used since these structures are allocated and freed in 40 * large numbers and space conservation is desired. 41 */ 42 struct vmw_validation_bo_node { 43 struct ttm_validate_buffer base; 44 struct drm_hash_item hash; 45 u32 as_mob : 1; 46 u32 cpu_blit : 1; 47 }; 48 49 /** 50 * struct vmw_validation_res_node - Resource validation metadata. 51 * @head: List head for the resource validation list. 52 * @hash: A hash entry used for the duplicate detection hash table. 53 * @res: Reference counted resource pointer. 54 * @new_backup: Non ref-counted pointer to new backup buffer to be assigned 55 * to a resource. 56 * @new_backup_offset: Offset into the new backup mob for resources that can 57 * share MOBs. 58 * @no_buffer_needed: Kernel does not need to allocate a MOB during validation, 59 * the command stream provides a mob bind operation. 60 * @switching_backup: The validation process is switching backup MOB. 61 * @first_usage: True iff the resource has been seen only once in the current 62 * validation batch. 63 * @reserved: Whether the resource is currently reserved by this process. 64 * @private: Optionally additional memory for caller-private data. 65 * 66 * Bit fields are used since these structures are allocated and freed in 67 * large numbers and space conservation is desired. 68 */ 69 struct vmw_validation_res_node { 70 struct list_head head; 71 struct drm_hash_item hash; 72 struct vmw_resource *res; 73 struct vmw_buffer_object *new_backup; 74 unsigned long new_backup_offset; 75 u32 no_buffer_needed : 1; 76 u32 switching_backup : 1; 77 u32 first_usage : 1; 78 u32 reserved : 1; 79 u32 dirty : 1; 80 u32 dirty_set : 1; 81 unsigned long private[0]; 82 }; 83 84 /** 85 * vmw_validation_mem_alloc - Allocate kernel memory from the validation 86 * context based allocator 87 * @ctx: The validation context 88 * @size: The number of bytes to allocated. 89 * 90 * The memory allocated may not exceed PAGE_SIZE, and the returned 91 * address is aligned to sizeof(long). All memory allocated this way is 92 * reclaimed after validation when calling any of the exported functions: 93 * vmw_validation_unref_lists() 94 * vmw_validation_revert() 95 * vmw_validation_done() 96 * 97 * Return: Pointer to the allocated memory on success. NULL on failure. 98 */ 99 void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx, 100 unsigned int size) 101 { 102 void *addr; 103 104 size = vmw_validation_align(size); 105 if (size > PAGE_SIZE) 106 return NULL; 107 108 if (ctx->mem_size_left < size) { 109 struct page *page; 110 111 if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) { 112 int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran); 113 114 if (ret) 115 return NULL; 116 117 ctx->vm_size_left += ctx->vm->gran; 118 ctx->total_mem += ctx->vm->gran; 119 } 120 121 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 122 if (!page) 123 return NULL; 124 125 if (ctx->vm) 126 ctx->vm_size_left -= PAGE_SIZE; 127 128 list_add_tail(&page->lru, &ctx->page_list); 129 ctx->page_address = page_address(page); 130 ctx->mem_size_left = PAGE_SIZE; 131 } 132 133 addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left)); 134 ctx->mem_size_left -= size; 135 136 return addr; 137 } 138 139 /** 140 * vmw_validation_mem_free - Free all memory allocated using 141 * vmw_validation_mem_alloc() 142 * @ctx: The validation context 143 * 144 * All memory previously allocated for this context using 145 * vmw_validation_mem_alloc() is freed. 146 */ 147 static void vmw_validation_mem_free(struct vmw_validation_context *ctx) 148 { 149 struct page *entry, *next; 150 151 list_for_each_entry_safe(entry, next, &ctx->page_list, lru) { 152 list_del_init(&entry->lru); 153 __free_page(entry); 154 } 155 156 ctx->mem_size_left = 0; 157 if (ctx->vm && ctx->total_mem) { 158 ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem); 159 ctx->total_mem = 0; 160 ctx->vm_size_left = 0; 161 } 162 } 163 164 /** 165 * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the 166 * validation context's lists. 167 * @ctx: The validation context to search. 168 * @vbo: The buffer object to search for. 169 * 170 * Return: Pointer to the struct vmw_validation_bo_node referencing the 171 * duplicate, or NULL if none found. 172 */ 173 static struct vmw_validation_bo_node * 174 vmw_validation_find_bo_dup(struct vmw_validation_context *ctx, 175 struct vmw_buffer_object *vbo) 176 { 177 struct vmw_validation_bo_node *bo_node = NULL; 178 179 if (!ctx->merge_dups) 180 return NULL; 181 182 if (ctx->ht) { 183 struct drm_hash_item *hash; 184 185 if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash)) 186 bo_node = container_of(hash, typeof(*bo_node), hash); 187 } else { 188 struct vmw_validation_bo_node *entry; 189 190 list_for_each_entry(entry, &ctx->bo_list, base.head) { 191 if (entry->base.bo == &vbo->base) { 192 bo_node = entry; 193 break; 194 } 195 } 196 } 197 198 return bo_node; 199 } 200 201 /** 202 * vmw_validation_find_res_dup - Find a duplicate resource entry in the 203 * validation context's lists. 204 * @ctx: The validation context to search. 205 * @vbo: The buffer object to search for. 206 * 207 * Return: Pointer to the struct vmw_validation_bo_node referencing the 208 * duplicate, or NULL if none found. 209 */ 210 static struct vmw_validation_res_node * 211 vmw_validation_find_res_dup(struct vmw_validation_context *ctx, 212 struct vmw_resource *res) 213 { 214 struct vmw_validation_res_node *res_node = NULL; 215 216 if (!ctx->merge_dups) 217 return NULL; 218 219 if (ctx->ht) { 220 struct drm_hash_item *hash; 221 222 if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash)) 223 res_node = container_of(hash, typeof(*res_node), hash); 224 } else { 225 struct vmw_validation_res_node *entry; 226 227 list_for_each_entry(entry, &ctx->resource_ctx_list, head) { 228 if (entry->res == res) { 229 res_node = entry; 230 goto out; 231 } 232 } 233 234 list_for_each_entry(entry, &ctx->resource_list, head) { 235 if (entry->res == res) { 236 res_node = entry; 237 break; 238 } 239 } 240 241 } 242 out: 243 return res_node; 244 } 245 246 /** 247 * vmw_validation_add_bo - Add a buffer object to the validation context. 248 * @ctx: The validation context. 249 * @vbo: The buffer object. 250 * @as_mob: Validate as mob, otherwise suitable for GMR operations. 251 * @cpu_blit: Validate in a page-mappable location. 252 * 253 * Return: Zero on success, negative error code otherwise. 254 */ 255 int vmw_validation_add_bo(struct vmw_validation_context *ctx, 256 struct vmw_buffer_object *vbo, 257 bool as_mob, 258 bool cpu_blit) 259 { 260 struct vmw_validation_bo_node *bo_node; 261 262 bo_node = vmw_validation_find_bo_dup(ctx, vbo); 263 if (bo_node) { 264 if (bo_node->as_mob != as_mob || 265 bo_node->cpu_blit != cpu_blit) { 266 DRM_ERROR("Inconsistent buffer usage.\n"); 267 return -EINVAL; 268 } 269 } else { 270 struct ttm_validate_buffer *val_buf; 271 int ret; 272 273 bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node)); 274 if (!bo_node) 275 return -ENOMEM; 276 277 if (ctx->ht) { 278 bo_node->hash.key = (unsigned long) vbo; 279 ret = drm_ht_insert_item(ctx->ht, &bo_node->hash); 280 if (ret) { 281 DRM_ERROR("Failed to initialize a buffer " 282 "validation entry.\n"); 283 return ret; 284 } 285 } 286 val_buf = &bo_node->base; 287 val_buf->bo = ttm_bo_get_unless_zero(&vbo->base); 288 if (!val_buf->bo) 289 return -ESRCH; 290 val_buf->num_shared = 0; 291 list_add_tail(&val_buf->head, &ctx->bo_list); 292 bo_node->as_mob = as_mob; 293 bo_node->cpu_blit = cpu_blit; 294 } 295 296 return 0; 297 } 298 299 /** 300 * vmw_validation_add_resource - Add a resource to the validation context. 301 * @ctx: The validation context. 302 * @res: The resource. 303 * @priv_size: Size of private, additional metadata. 304 * @dirty: Whether to change dirty status. 305 * @p_node: Output pointer of additional metadata address. 306 * @first_usage: Whether this was the first time this resource was seen. 307 * 308 * Return: Zero on success, negative error code otherwise. 309 */ 310 int vmw_validation_add_resource(struct vmw_validation_context *ctx, 311 struct vmw_resource *res, 312 size_t priv_size, 313 u32 dirty, 314 void **p_node, 315 bool *first_usage) 316 { 317 struct vmw_validation_res_node *node; 318 int ret; 319 320 node = vmw_validation_find_res_dup(ctx, res); 321 if (node) { 322 node->first_usage = 0; 323 goto out_fill; 324 } 325 326 node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size); 327 if (!node) { 328 VMW_DEBUG_USER("Failed to allocate a resource validation entry.\n"); 329 return -ENOMEM; 330 } 331 332 if (ctx->ht) { 333 node->hash.key = (unsigned long) res; 334 ret = drm_ht_insert_item(ctx->ht, &node->hash); 335 if (ret) { 336 DRM_ERROR("Failed to initialize a resource validation " 337 "entry.\n"); 338 return ret; 339 } 340 } 341 node->res = vmw_resource_reference_unless_doomed(res); 342 if (!node->res) 343 return -ESRCH; 344 345 node->first_usage = 1; 346 if (!res->dev_priv->has_mob) { 347 list_add_tail(&node->head, &ctx->resource_list); 348 } else { 349 switch (vmw_res_type(res)) { 350 case vmw_res_context: 351 case vmw_res_dx_context: 352 list_add(&node->head, &ctx->resource_ctx_list); 353 break; 354 case vmw_res_cotable: 355 list_add_tail(&node->head, &ctx->resource_ctx_list); 356 break; 357 default: 358 list_add_tail(&node->head, &ctx->resource_list); 359 break; 360 } 361 } 362 363 out_fill: 364 if (dirty) { 365 node->dirty_set = 1; 366 /* Overwriting previous information here is intentional! */ 367 node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0; 368 } 369 if (first_usage) 370 *first_usage = node->first_usage; 371 if (p_node) 372 *p_node = &node->private; 373 374 return 0; 375 } 376 377 /** 378 * vmw_validation_res_set_dirty - Register a resource dirty set or clear during 379 * validation. 380 * @ctx: The validation context. 381 * @val_private: The additional meta-data pointer returned when the 382 * resource was registered with the validation context. Used to identify 383 * the resource. 384 * @dirty: Dirty information VMW_RES_DIRTY_XX 385 */ 386 void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx, 387 void *val_private, u32 dirty) 388 { 389 struct vmw_validation_res_node *val; 390 391 if (!dirty) 392 return; 393 394 val = container_of(val_private, typeof(*val), private); 395 val->dirty_set = 1; 396 /* Overwriting previous information here is intentional! */ 397 val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0; 398 } 399 400 /** 401 * vmw_validation_res_switch_backup - Register a backup MOB switch during 402 * validation. 403 * @ctx: The validation context. 404 * @val_private: The additional meta-data pointer returned when the 405 * resource was registered with the validation context. Used to identify 406 * the resource. 407 * @vbo: The new backup buffer object MOB. This buffer object needs to have 408 * already been registered with the validation context. 409 * @backup_offset: Offset into the new backup MOB. 410 */ 411 void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx, 412 void *val_private, 413 struct vmw_buffer_object *vbo, 414 unsigned long backup_offset) 415 { 416 struct vmw_validation_res_node *val; 417 418 val = container_of(val_private, typeof(*val), private); 419 420 val->switching_backup = 1; 421 if (val->first_usage) 422 val->no_buffer_needed = 1; 423 424 val->new_backup = vbo; 425 val->new_backup_offset = backup_offset; 426 } 427 428 /** 429 * vmw_validation_res_reserve - Reserve all resources registered with this 430 * validation context. 431 * @ctx: The validation context. 432 * @intr: Use interruptible waits when possible. 433 * 434 * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error 435 * code on failure. 436 */ 437 int vmw_validation_res_reserve(struct vmw_validation_context *ctx, 438 bool intr) 439 { 440 struct vmw_validation_res_node *val; 441 int ret = 0; 442 443 list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list); 444 445 list_for_each_entry(val, &ctx->resource_list, head) { 446 struct vmw_resource *res = val->res; 447 448 ret = vmw_resource_reserve(res, intr, val->no_buffer_needed); 449 if (ret) 450 goto out_unreserve; 451 452 val->reserved = 1; 453 if (res->backup) { 454 struct vmw_buffer_object *vbo = res->backup; 455 456 ret = vmw_validation_add_bo 457 (ctx, vbo, vmw_resource_needs_backup(res), 458 false); 459 if (ret) 460 goto out_unreserve; 461 } 462 } 463 464 return 0; 465 466 out_unreserve: 467 vmw_validation_res_unreserve(ctx, true); 468 return ret; 469 } 470 471 /** 472 * vmw_validation_res_unreserve - Unreserve all reserved resources 473 * registered with this validation context. 474 * @ctx: The validation context. 475 * @backoff: Whether this is a backoff- of a commit-type operation. This 476 * is used to determine whether to switch backup MOBs or not. 477 */ 478 void vmw_validation_res_unreserve(struct vmw_validation_context *ctx, 479 bool backoff) 480 { 481 struct vmw_validation_res_node *val; 482 483 list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list); 484 if (backoff) 485 list_for_each_entry(val, &ctx->resource_list, head) { 486 if (val->reserved) 487 vmw_resource_unreserve(val->res, 488 false, false, false, 489 NULL, 0); 490 } 491 else 492 list_for_each_entry(val, &ctx->resource_list, head) { 493 if (val->reserved) 494 vmw_resource_unreserve(val->res, 495 val->dirty_set, 496 val->dirty, 497 val->switching_backup, 498 val->new_backup, 499 val->new_backup_offset); 500 } 501 } 502 503 /** 504 * vmw_validation_bo_validate_single - Validate a single buffer object. 505 * @bo: The TTM buffer object base. 506 * @interruptible: Whether to perform waits interruptible if possible. 507 * @validate_as_mob: Whether to validate in MOB memory. 508 * 509 * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error 510 * code on failure. 511 */ 512 int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo, 513 bool interruptible, 514 bool validate_as_mob) 515 { 516 struct vmw_buffer_object *vbo = 517 container_of(bo, struct vmw_buffer_object, base); 518 struct ttm_operation_ctx ctx = { 519 .interruptible = interruptible, 520 .no_wait_gpu = false 521 }; 522 int ret; 523 524 if (atomic_read(&vbo->cpu_writers)) 525 return -EBUSY; 526 527 if (vbo->pin_count > 0) 528 return 0; 529 530 if (validate_as_mob) 531 return ttm_bo_validate(bo, &vmw_mob_placement, &ctx); 532 533 /** 534 * Put BO in VRAM if there is space, otherwise as a GMR. 535 * If there is no space in VRAM and GMR ids are all used up, 536 * start evicting GMRs to make room. If the DMA buffer can't be 537 * used as a GMR, this will return -ENOMEM. 538 */ 539 540 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx); 541 if (ret == 0 || ret == -ERESTARTSYS) 542 return ret; 543 544 /** 545 * If that failed, try VRAM again, this time evicting 546 * previous contents. 547 */ 548 549 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx); 550 return ret; 551 } 552 553 /** 554 * vmw_validation_bo_validate - Validate all buffer objects registered with 555 * the validation context. 556 * @ctx: The validation context. 557 * @intr: Whether to perform waits interruptible if possible. 558 * 559 * Return: Zero on success, -ERESTARTSYS if interrupted, 560 * negative error code on failure. 561 */ 562 int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr) 563 { 564 struct vmw_validation_bo_node *entry; 565 int ret; 566 567 list_for_each_entry(entry, &ctx->bo_list, base.head) { 568 if (entry->cpu_blit) { 569 struct ttm_operation_ctx ctx = { 570 .interruptible = intr, 571 .no_wait_gpu = false 572 }; 573 574 ret = ttm_bo_validate(entry->base.bo, 575 &vmw_nonfixed_placement, &ctx); 576 } else { 577 ret = vmw_validation_bo_validate_single 578 (entry->base.bo, intr, entry->as_mob); 579 } 580 if (ret) 581 return ret; 582 } 583 return 0; 584 } 585 586 /** 587 * vmw_validation_res_validate - Validate all resources registered with the 588 * validation context. 589 * @ctx: The validation context. 590 * @intr: Whether to perform waits interruptible if possible. 591 * 592 * Before this function is called, all resource backup buffers must have 593 * been validated. 594 * 595 * Return: Zero on success, -ERESTARTSYS if interrupted, 596 * negative error code on failure. 597 */ 598 int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr) 599 { 600 struct vmw_validation_res_node *val; 601 int ret; 602 603 list_for_each_entry(val, &ctx->resource_list, head) { 604 struct vmw_resource *res = val->res; 605 struct vmw_buffer_object *backup = res->backup; 606 607 ret = vmw_resource_validate(res, intr); 608 if (ret) { 609 if (ret != -ERESTARTSYS) 610 DRM_ERROR("Failed to validate resource.\n"); 611 return ret; 612 } 613 614 /* Check if the resource switched backup buffer */ 615 if (backup && res->backup && (backup != res->backup)) { 616 struct vmw_buffer_object *vbo = res->backup; 617 618 ret = vmw_validation_add_bo 619 (ctx, vbo, vmw_resource_needs_backup(res), 620 false); 621 if (ret) 622 return ret; 623 } 624 } 625 return 0; 626 } 627 628 /** 629 * vmw_validation_drop_ht - Reset the hash table used for duplicate finding 630 * and unregister it from this validation context. 631 * @ctx: The validation context. 632 * 633 * The hash table used for duplicate finding is an expensive resource and 634 * may be protected by mutexes that may cause deadlocks during resource 635 * unreferencing if held. After resource- and buffer object registering, 636 * there is no longer any use for this hash table, so allow freeing it 637 * either to shorten any mutex locking time, or before resources- and 638 * buffer objects are freed during validation context cleanup. 639 */ 640 void vmw_validation_drop_ht(struct vmw_validation_context *ctx) 641 { 642 struct vmw_validation_bo_node *entry; 643 struct vmw_validation_res_node *val; 644 645 if (!ctx->ht) 646 return; 647 648 list_for_each_entry(entry, &ctx->bo_list, base.head) 649 (void) drm_ht_remove_item(ctx->ht, &entry->hash); 650 651 list_for_each_entry(val, &ctx->resource_list, head) 652 (void) drm_ht_remove_item(ctx->ht, &val->hash); 653 654 list_for_each_entry(val, &ctx->resource_ctx_list, head) 655 (void) drm_ht_remove_item(ctx->ht, &val->hash); 656 657 ctx->ht = NULL; 658 } 659 660 /** 661 * vmw_validation_unref_lists - Unregister previously registered buffer 662 * object and resources. 663 * @ctx: The validation context. 664 * 665 * Note that this function may cause buffer object- and resource destructors 666 * to be invoked. 667 */ 668 void vmw_validation_unref_lists(struct vmw_validation_context *ctx) 669 { 670 struct vmw_validation_bo_node *entry; 671 struct vmw_validation_res_node *val; 672 673 list_for_each_entry(entry, &ctx->bo_list, base.head) { 674 ttm_bo_put(entry->base.bo); 675 entry->base.bo = NULL; 676 } 677 678 list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list); 679 list_for_each_entry(val, &ctx->resource_list, head) 680 vmw_resource_unreference(&val->res); 681 682 /* 683 * No need to detach each list entry since they are all freed with 684 * vmw_validation_free_mem. Just make the inaccessible. 685 */ 686 INIT_LIST_HEAD(&ctx->bo_list); 687 INIT_LIST_HEAD(&ctx->resource_list); 688 689 vmw_validation_mem_free(ctx); 690 } 691 692 /** 693 * vmw_validation_prepare - Prepare a validation context for command 694 * submission. 695 * @ctx: The validation context. 696 * @mutex: The mutex used to protect resource reservation. 697 * @intr: Whether to perform waits interruptible if possible. 698 * 699 * Note that the single reservation mutex @mutex is an unfortunate 700 * construct. Ideally resource reservation should be moved to per-resource 701 * ww_mutexes. 702 * If this functions doesn't return Zero to indicate success, all resources 703 * are left unreserved but still referenced. 704 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code 705 * on error. 706 */ 707 int vmw_validation_prepare(struct vmw_validation_context *ctx, 708 struct mutex *mutex, 709 bool intr) 710 { 711 int ret = 0; 712 713 if (mutex) { 714 if (intr) 715 ret = mutex_lock_interruptible(mutex); 716 else 717 mutex_lock(mutex); 718 if (ret) 719 return -ERESTARTSYS; 720 } 721 722 ctx->res_mutex = mutex; 723 ret = vmw_validation_res_reserve(ctx, intr); 724 if (ret) 725 goto out_no_res_reserve; 726 727 ret = vmw_validation_bo_reserve(ctx, intr); 728 if (ret) 729 goto out_no_bo_reserve; 730 731 ret = vmw_validation_bo_validate(ctx, intr); 732 if (ret) 733 goto out_no_validate; 734 735 ret = vmw_validation_res_validate(ctx, intr); 736 if (ret) 737 goto out_no_validate; 738 739 return 0; 740 741 out_no_validate: 742 vmw_validation_bo_backoff(ctx); 743 out_no_bo_reserve: 744 vmw_validation_res_unreserve(ctx, true); 745 out_no_res_reserve: 746 if (mutex) 747 mutex_unlock(mutex); 748 749 return ret; 750 } 751 752 /** 753 * vmw_validation_revert - Revert validation actions if command submission 754 * failed. 755 * 756 * @ctx: The validation context. 757 * 758 * The caller still needs to unref resources after a call to this function. 759 */ 760 void vmw_validation_revert(struct vmw_validation_context *ctx) 761 { 762 vmw_validation_bo_backoff(ctx); 763 vmw_validation_res_unreserve(ctx, true); 764 if (ctx->res_mutex) 765 mutex_unlock(ctx->res_mutex); 766 vmw_validation_unref_lists(ctx); 767 } 768 769 /** 770 * vmw_validation_cone - Commit validation actions after command submission 771 * success. 772 * @ctx: The validation context. 773 * @fence: Fence with which to fence all buffer objects taking part in the 774 * command submission. 775 * 776 * The caller does NOT need to unref resources after a call to this function. 777 */ 778 void vmw_validation_done(struct vmw_validation_context *ctx, 779 struct vmw_fence_obj *fence) 780 { 781 vmw_validation_bo_fence(ctx, fence); 782 vmw_validation_res_unreserve(ctx, false); 783 if (ctx->res_mutex) 784 mutex_unlock(ctx->res_mutex); 785 vmw_validation_unref_lists(ctx); 786 } 787 788 /** 789 * vmw_validation_preload_bo - Preload the validation memory allocator for a 790 * call to vmw_validation_add_bo(). 791 * @ctx: Pointer to the validation context. 792 * 793 * Iff this function returns successfully, the next call to 794 * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal 795 * but voids the guarantee. 796 * 797 * Returns: Zero if successful, %-EINVAL otherwise. 798 */ 799 int vmw_validation_preload_bo(struct vmw_validation_context *ctx) 800 { 801 unsigned int size = sizeof(struct vmw_validation_bo_node); 802 803 if (!vmw_validation_mem_alloc(ctx, size)) 804 return -ENOMEM; 805 806 ctx->mem_size_left += size; 807 return 0; 808 } 809 810 /** 811 * vmw_validation_preload_res - Preload the validation memory allocator for a 812 * call to vmw_validation_add_res(). 813 * @ctx: Pointer to the validation context. 814 * @size: Size of the validation node extra data. See below. 815 * 816 * Iff this function returns successfully, the next call to 817 * vmw_validation_add_res() with the same or smaller @size is guaranteed not to 818 * sleep. An error is not fatal but voids the guarantee. 819 * 820 * Returns: Zero if successful, %-EINVAL otherwise. 821 */ 822 int vmw_validation_preload_res(struct vmw_validation_context *ctx, 823 unsigned int size) 824 { 825 size = vmw_validation_align(sizeof(struct vmw_validation_res_node) + 826 size) + 827 vmw_validation_align(sizeof(struct vmw_validation_bo_node)); 828 if (!vmw_validation_mem_alloc(ctx, size)) 829 return -ENOMEM; 830 831 ctx->mem_size_left += size; 832 return 0; 833 } 834