1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 #include <linux/slab.h>
29 #include "vmwgfx_validation.h"
30 #include "vmwgfx_drv.h"
31 
32 /**
33  * struct vmw_validation_bo_node - Buffer object validation metadata.
34  * @base: Metadata used for TTM reservation- and validation.
35  * @hash: A hash entry used for the duplicate detection hash table.
36  * @as_mob: Validate as mob.
37  * @cpu_blit: Validate for cpu blit access.
38  *
39  * Bit fields are used since these structures are allocated and freed in
40  * large numbers and space conservation is desired.
41  */
42 struct vmw_validation_bo_node {
43 	struct ttm_validate_buffer base;
44 	struct drm_hash_item hash;
45 	u32 as_mob : 1;
46 	u32 cpu_blit : 1;
47 };
48 
49 /**
50  * struct vmw_validation_res_node - Resource validation metadata.
51  * @head: List head for the resource validation list.
52  * @hash: A hash entry used for the duplicate detection hash table.
53  * @res: Reference counted resource pointer.
54  * @new_backup: Non ref-counted pointer to new backup buffer to be assigned
55  * to a resource.
56  * @new_backup_offset: Offset into the new backup mob for resources that can
57  * share MOBs.
58  * @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
59  * the command stream provides a mob bind operation.
60  * @switching_backup: The validation process is switching backup MOB.
61  * @first_usage: True iff the resource has been seen only once in the current
62  * validation batch.
63  * @reserved: Whether the resource is currently reserved by this process.
64  * @private: Optionally additional memory for caller-private data.
65  *
66  * Bit fields are used since these structures are allocated and freed in
67  * large numbers and space conservation is desired.
68  */
69 struct vmw_validation_res_node {
70 	struct list_head head;
71 	struct drm_hash_item hash;
72 	struct vmw_resource *res;
73 	struct vmw_buffer_object *new_backup;
74 	unsigned long new_backup_offset;
75 	u32 no_buffer_needed : 1;
76 	u32 switching_backup : 1;
77 	u32 first_usage : 1;
78 	u32 reserved : 1;
79 	unsigned long private[0];
80 };
81 
82 /**
83  * vmw_validation_mem_alloc - Allocate kernel memory from the validation
84  * context based allocator
85  * @ctx: The validation context
86  * @size: The number of bytes to allocated.
87  *
88  * The memory allocated may not exceed PAGE_SIZE, and the returned
89  * address is aligned to sizeof(long). All memory allocated this way is
90  * reclaimed after validation when calling any of the exported functions:
91  * vmw_validation_unref_lists()
92  * vmw_validation_revert()
93  * vmw_validation_done()
94  *
95  * Return: Pointer to the allocated memory on success. NULL on failure.
96  */
97 void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
98 			       unsigned int size)
99 {
100 	void *addr;
101 
102 	size = vmw_validation_align(size);
103 	if (size > PAGE_SIZE)
104 		return NULL;
105 
106 	if (ctx->mem_size_left < size) {
107 		struct page *page;
108 
109 		if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
110 			int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
111 
112 			if (ret)
113 				return NULL;
114 
115 			ctx->vm_size_left += ctx->vm->gran;
116 			ctx->total_mem += ctx->vm->gran;
117 		}
118 
119 		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
120 		if (!page)
121 			return NULL;
122 
123 		if (ctx->vm)
124 			ctx->vm_size_left -= PAGE_SIZE;
125 
126 		list_add_tail(&page->lru, &ctx->page_list);
127 		ctx->page_address = page_address(page);
128 		ctx->mem_size_left = PAGE_SIZE;
129 	}
130 
131 	addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
132 	ctx->mem_size_left -= size;
133 
134 	return addr;
135 }
136 
137 /**
138  * vmw_validation_mem_free - Free all memory allocated using
139  * vmw_validation_mem_alloc()
140  * @ctx: The validation context
141  *
142  * All memory previously allocated for this context using
143  * vmw_validation_mem_alloc() is freed.
144  */
145 static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
146 {
147 	struct page *entry, *next;
148 
149 	list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
150 		list_del_init(&entry->lru);
151 		__free_page(entry);
152 	}
153 
154 	ctx->mem_size_left = 0;
155 	if (ctx->vm && ctx->total_mem) {
156 		ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
157 		ctx->total_mem = 0;
158 		ctx->vm_size_left = 0;
159 	}
160 }
161 
162 /**
163  * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
164  * validation context's lists.
165  * @ctx: The validation context to search.
166  * @vbo: The buffer object to search for.
167  *
168  * Return: Pointer to the struct vmw_validation_bo_node referencing the
169  * duplicate, or NULL if none found.
170  */
171 static struct vmw_validation_bo_node *
172 vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
173 			   struct vmw_buffer_object *vbo)
174 {
175 	struct  vmw_validation_bo_node *bo_node = NULL;
176 
177 	if (!ctx->merge_dups)
178 		return NULL;
179 
180 	if (ctx->ht) {
181 		struct drm_hash_item *hash;
182 
183 		if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
184 			bo_node = container_of(hash, typeof(*bo_node), hash);
185 	} else {
186 		struct  vmw_validation_bo_node *entry;
187 
188 		list_for_each_entry(entry, &ctx->bo_list, base.head) {
189 			if (entry->base.bo == &vbo->base) {
190 				bo_node = entry;
191 				break;
192 			}
193 		}
194 	}
195 
196 	return bo_node;
197 }
198 
199 /**
200  * vmw_validation_find_res_dup - Find a duplicate resource entry in the
201  * validation context's lists.
202  * @ctx: The validation context to search.
203  * @vbo: The buffer object to search for.
204  *
205  * Return: Pointer to the struct vmw_validation_bo_node referencing the
206  * duplicate, or NULL if none found.
207  */
208 static struct vmw_validation_res_node *
209 vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
210 			    struct vmw_resource *res)
211 {
212 	struct  vmw_validation_res_node *res_node = NULL;
213 
214 	if (!ctx->merge_dups)
215 		return NULL;
216 
217 	if (ctx->ht) {
218 		struct drm_hash_item *hash;
219 
220 		if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash))
221 			res_node = container_of(hash, typeof(*res_node), hash);
222 	} else {
223 		struct  vmw_validation_res_node *entry;
224 
225 		list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
226 			if (entry->res == res) {
227 				res_node = entry;
228 				goto out;
229 			}
230 		}
231 
232 		list_for_each_entry(entry, &ctx->resource_list, head) {
233 			if (entry->res == res) {
234 				res_node = entry;
235 				break;
236 			}
237 		}
238 
239 	}
240 out:
241 	return res_node;
242 }
243 
244 /**
245  * vmw_validation_add_bo - Add a buffer object to the validation context.
246  * @ctx: The validation context.
247  * @vbo: The buffer object.
248  * @as_mob: Validate as mob, otherwise suitable for GMR operations.
249  * @cpu_blit: Validate in a page-mappable location.
250  *
251  * Return: Zero on success, negative error code otherwise.
252  */
253 int vmw_validation_add_bo(struct vmw_validation_context *ctx,
254 			  struct vmw_buffer_object *vbo,
255 			  bool as_mob,
256 			  bool cpu_blit)
257 {
258 	struct vmw_validation_bo_node *bo_node;
259 
260 	bo_node = vmw_validation_find_bo_dup(ctx, vbo);
261 	if (bo_node) {
262 		if (bo_node->as_mob != as_mob ||
263 		    bo_node->cpu_blit != cpu_blit) {
264 			DRM_ERROR("Inconsistent buffer usage.\n");
265 			return -EINVAL;
266 		}
267 	} else {
268 		struct ttm_validate_buffer *val_buf;
269 		int ret;
270 
271 		bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
272 		if (!bo_node)
273 			return -ENOMEM;
274 
275 		if (ctx->ht) {
276 			bo_node->hash.key = (unsigned long) vbo;
277 			ret = drm_ht_insert_item(ctx->ht, &bo_node->hash);
278 			if (ret) {
279 				DRM_ERROR("Failed to initialize a buffer "
280 					  "validation entry.\n");
281 				return ret;
282 			}
283 		}
284 		val_buf = &bo_node->base;
285 		val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
286 		if (!val_buf->bo)
287 			return -ESRCH;
288 		val_buf->num_shared = 0;
289 		list_add_tail(&val_buf->head, &ctx->bo_list);
290 		bo_node->as_mob = as_mob;
291 		bo_node->cpu_blit = cpu_blit;
292 	}
293 
294 	return 0;
295 }
296 
297 /**
298  * vmw_validation_add_resource - Add a resource to the validation context.
299  * @ctx: The validation context.
300  * @res: The resource.
301  * @priv_size: Size of private, additional metadata.
302  * @p_node: Output pointer of additional metadata address.
303  * @first_usage: Whether this was the first time this resource was seen.
304  *
305  * Return: Zero on success, negative error code otherwise.
306  */
307 int vmw_validation_add_resource(struct vmw_validation_context *ctx,
308 				struct vmw_resource *res,
309 				size_t priv_size,
310 				void **p_node,
311 				bool *first_usage)
312 {
313 	struct vmw_validation_res_node *node;
314 	int ret;
315 
316 	node = vmw_validation_find_res_dup(ctx, res);
317 	if (node) {
318 		node->first_usage = 0;
319 		goto out_fill;
320 	}
321 
322 	node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
323 	if (!node) {
324 		DRM_ERROR("Failed to allocate a resource validation "
325 			  "entry.\n");
326 		return -ENOMEM;
327 	}
328 
329 	if (ctx->ht) {
330 		node->hash.key = (unsigned long) res;
331 		ret = drm_ht_insert_item(ctx->ht, &node->hash);
332 		if (ret) {
333 			DRM_ERROR("Failed to initialize a resource validation "
334 				  "entry.\n");
335 			return ret;
336 		}
337 	}
338 	node->res = vmw_resource_reference_unless_doomed(res);
339 	if (!node->res)
340 		return -ESRCH;
341 
342 	node->first_usage = 1;
343 	if (!res->dev_priv->has_mob) {
344 		list_add_tail(&node->head, &ctx->resource_list);
345 	} else {
346 		switch (vmw_res_type(res)) {
347 		case vmw_res_context:
348 		case vmw_res_dx_context:
349 			list_add(&node->head, &ctx->resource_ctx_list);
350 			break;
351 		case vmw_res_cotable:
352 			list_add_tail(&node->head, &ctx->resource_ctx_list);
353 			break;
354 		default:
355 			list_add_tail(&node->head, &ctx->resource_list);
356 			break;
357 		}
358 	}
359 
360 out_fill:
361 	if (first_usage)
362 		*first_usage = node->first_usage;
363 	if (p_node)
364 		*p_node = &node->private;
365 
366 	return 0;
367 }
368 
369 /**
370  * vmw_validation_res_switch_backup - Register a backup MOB switch during
371  * validation.
372  * @ctx: The validation context.
373  * @val_private: The additional meta-data pointer returned when the
374  * resource was registered with the validation context. Used to identify
375  * the resource.
376  * @vbo: The new backup buffer object MOB. This buffer object needs to have
377  * already been registered with the validation context.
378  * @backup_offset: Offset into the new backup MOB.
379  */
380 void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
381 				      void *val_private,
382 				      struct vmw_buffer_object *vbo,
383 				      unsigned long backup_offset)
384 {
385 	struct vmw_validation_res_node *val;
386 
387 	val = container_of(val_private, typeof(*val), private);
388 
389 	val->switching_backup = 1;
390 	if (val->first_usage)
391 		val->no_buffer_needed = 1;
392 
393 	val->new_backup = vbo;
394 	val->new_backup_offset = backup_offset;
395 }
396 
397 /**
398  * vmw_validation_res_reserve - Reserve all resources registered with this
399  * validation context.
400  * @ctx: The validation context.
401  * @intr: Use interruptible waits when possible.
402  *
403  * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
404  * code on failure.
405  */
406 int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
407 			       bool intr)
408 {
409 	struct vmw_validation_res_node *val;
410 	int ret = 0;
411 
412 	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
413 
414 	list_for_each_entry(val, &ctx->resource_list, head) {
415 		struct vmw_resource *res = val->res;
416 
417 		ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
418 		if (ret)
419 			goto out_unreserve;
420 
421 		val->reserved = 1;
422 		if (res->backup) {
423 			struct vmw_buffer_object *vbo = res->backup;
424 
425 			ret = vmw_validation_add_bo
426 				(ctx, vbo, vmw_resource_needs_backup(res),
427 				 false);
428 			if (ret)
429 				goto out_unreserve;
430 		}
431 	}
432 
433 	return 0;
434 
435 out_unreserve:
436 	vmw_validation_res_unreserve(ctx, true);
437 	return ret;
438 }
439 
440 /**
441  * vmw_validation_res_unreserve - Unreserve all reserved resources
442  * registered with this validation context.
443  * @ctx: The validation context.
444  * @backoff: Whether this is a backoff- of a commit-type operation. This
445  * is used to determine whether to switch backup MOBs or not.
446  */
447 void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
448 				 bool backoff)
449 {
450 	struct vmw_validation_res_node *val;
451 
452 	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
453 
454 	list_for_each_entry(val, &ctx->resource_list, head) {
455 		if (val->reserved)
456 			vmw_resource_unreserve(val->res,
457 					       !backoff &&
458 					       val->switching_backup,
459 					       val->new_backup,
460 					       val->new_backup_offset);
461 	}
462 }
463 
464 /**
465  * vmw_validation_bo_validate_single - Validate a single buffer object.
466  * @bo: The TTM buffer object base.
467  * @interruptible: Whether to perform waits interruptible if possible.
468  * @validate_as_mob: Whether to validate in MOB memory.
469  *
470  * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
471  * code on failure.
472  */
473 int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
474 				      bool interruptible,
475 				      bool validate_as_mob)
476 {
477 	struct vmw_buffer_object *vbo =
478 		container_of(bo, struct vmw_buffer_object, base);
479 	struct ttm_operation_ctx ctx = {
480 		.interruptible = interruptible,
481 		.no_wait_gpu = false
482 	};
483 	int ret;
484 
485 	if (vbo->pin_count > 0)
486 		return 0;
487 
488 	if (validate_as_mob)
489 		return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
490 
491 	/**
492 	 * Put BO in VRAM if there is space, otherwise as a GMR.
493 	 * If there is no space in VRAM and GMR ids are all used up,
494 	 * start evicting GMRs to make room. If the DMA buffer can't be
495 	 * used as a GMR, this will return -ENOMEM.
496 	 */
497 
498 	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
499 	if (ret == 0 || ret == -ERESTARTSYS)
500 		return ret;
501 
502 	/**
503 	 * If that failed, try VRAM again, this time evicting
504 	 * previous contents.
505 	 */
506 
507 	ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
508 	return ret;
509 }
510 
511 /**
512  * vmw_validation_bo_validate - Validate all buffer objects registered with
513  * the validation context.
514  * @ctx: The validation context.
515  * @intr: Whether to perform waits interruptible if possible.
516  *
517  * Return: Zero on success, -ERESTARTSYS if interrupted,
518  * negative error code on failure.
519  */
520 int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
521 {
522 	struct vmw_validation_bo_node *entry;
523 	int ret;
524 
525 	list_for_each_entry(entry, &ctx->bo_list, base.head) {
526 		if (entry->cpu_blit) {
527 			struct ttm_operation_ctx ctx = {
528 				.interruptible = intr,
529 				.no_wait_gpu = false
530 			};
531 
532 			ret = ttm_bo_validate(entry->base.bo,
533 					      &vmw_nonfixed_placement, &ctx);
534 		} else {
535 			ret = vmw_validation_bo_validate_single
536 			(entry->base.bo, intr, entry->as_mob);
537 		}
538 		if (ret)
539 			return ret;
540 	}
541 	return 0;
542 }
543 
544 /**
545  * vmw_validation_res_validate - Validate all resources registered with the
546  * validation context.
547  * @ctx: The validation context.
548  * @intr: Whether to perform waits interruptible if possible.
549  *
550  * Before this function is called, all resource backup buffers must have
551  * been validated.
552  *
553  * Return: Zero on success, -ERESTARTSYS if interrupted,
554  * negative error code on failure.
555  */
556 int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
557 {
558 	struct vmw_validation_res_node *val;
559 	int ret;
560 
561 	list_for_each_entry(val, &ctx->resource_list, head) {
562 		struct vmw_resource *res = val->res;
563 		struct vmw_buffer_object *backup = res->backup;
564 
565 		ret = vmw_resource_validate(res, intr);
566 		if (ret) {
567 			if (ret != -ERESTARTSYS)
568 				DRM_ERROR("Failed to validate resource.\n");
569 			return ret;
570 		}
571 
572 		/* Check if the resource switched backup buffer */
573 		if (backup && res->backup && (backup != res->backup)) {
574 			struct vmw_buffer_object *vbo = res->backup;
575 
576 			ret = vmw_validation_add_bo
577 				(ctx, vbo, vmw_resource_needs_backup(res),
578 				 false);
579 			if (ret)
580 				return ret;
581 		}
582 	}
583 	return 0;
584 }
585 
586 /**
587  * vmw_validation_drop_ht - Reset the hash table used for duplicate finding
588  * and unregister it from this validation context.
589  * @ctx: The validation context.
590  *
591  * The hash table used for duplicate finding is an expensive resource and
592  * may be protected by mutexes that may cause deadlocks during resource
593  * unreferencing if held. After resource- and buffer object registering,
594  * there is no longer any use for this hash table, so allow freeing it
595  * either to shorten any mutex locking time, or before resources- and
596  * buffer objects are freed during validation context cleanup.
597  */
598 void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
599 {
600 	struct vmw_validation_bo_node *entry;
601 	struct vmw_validation_res_node *val;
602 
603 	if (!ctx->ht)
604 		return;
605 
606 	list_for_each_entry(entry, &ctx->bo_list, base.head)
607 		(void) drm_ht_remove_item(ctx->ht, &entry->hash);
608 
609 	list_for_each_entry(val, &ctx->resource_list, head)
610 		(void) drm_ht_remove_item(ctx->ht, &val->hash);
611 
612 	list_for_each_entry(val, &ctx->resource_ctx_list, head)
613 		(void) drm_ht_remove_item(ctx->ht, &val->hash);
614 
615 	ctx->ht = NULL;
616 }
617 
618 /**
619  * vmw_validation_unref_lists - Unregister previously registered buffer
620  * object and resources.
621  * @ctx: The validation context.
622  *
623  * Note that this function may cause buffer object- and resource destructors
624  * to be invoked.
625  */
626 void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
627 {
628 	struct vmw_validation_bo_node *entry;
629 	struct vmw_validation_res_node *val;
630 
631 	list_for_each_entry(entry, &ctx->bo_list, base.head) {
632 		ttm_bo_put(entry->base.bo);
633 		entry->base.bo = NULL;
634 	}
635 
636 	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
637 	list_for_each_entry(val, &ctx->resource_list, head)
638 		vmw_resource_unreference(&val->res);
639 
640 	/*
641 	 * No need to detach each list entry since they are all freed with
642 	 * vmw_validation_free_mem. Just make the inaccessible.
643 	 */
644 	INIT_LIST_HEAD(&ctx->bo_list);
645 	INIT_LIST_HEAD(&ctx->resource_list);
646 
647 	vmw_validation_mem_free(ctx);
648 }
649 
650 /**
651  * vmw_validation_prepare - Prepare a validation context for command
652  * submission.
653  * @ctx: The validation context.
654  * @mutex: The mutex used to protect resource reservation.
655  * @intr: Whether to perform waits interruptible if possible.
656  *
657  * Note that the single reservation mutex @mutex is an unfortunate
658  * construct. Ideally resource reservation should be moved to per-resource
659  * ww_mutexes.
660  * If this functions doesn't return Zero to indicate success, all resources
661  * are left unreserved but still referenced.
662  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
663  * on error.
664  */
665 int vmw_validation_prepare(struct vmw_validation_context *ctx,
666 			   struct mutex *mutex,
667 			   bool intr)
668 {
669 	int ret = 0;
670 
671 	if (mutex) {
672 		if (intr)
673 			ret = mutex_lock_interruptible(mutex);
674 		else
675 			mutex_lock(mutex);
676 		if (ret)
677 			return -ERESTARTSYS;
678 	}
679 
680 	ctx->res_mutex = mutex;
681 	ret = vmw_validation_res_reserve(ctx, intr);
682 	if (ret)
683 		goto out_no_res_reserve;
684 
685 	ret = vmw_validation_bo_reserve(ctx, intr);
686 	if (ret)
687 		goto out_no_bo_reserve;
688 
689 	ret = vmw_validation_bo_validate(ctx, intr);
690 	if (ret)
691 		goto out_no_validate;
692 
693 	ret = vmw_validation_res_validate(ctx, intr);
694 	if (ret)
695 		goto out_no_validate;
696 
697 	return 0;
698 
699 out_no_validate:
700 	vmw_validation_bo_backoff(ctx);
701 out_no_bo_reserve:
702 	vmw_validation_res_unreserve(ctx, true);
703 out_no_res_reserve:
704 	if (mutex)
705 		mutex_unlock(mutex);
706 
707 	return ret;
708 }
709 
710 /**
711  * vmw_validation_revert - Revert validation actions if command submission
712  * failed.
713  *
714  * @ctx: The validation context.
715  *
716  * The caller still needs to unref resources after a call to this function.
717  */
718 void vmw_validation_revert(struct vmw_validation_context *ctx)
719 {
720 	vmw_validation_bo_backoff(ctx);
721 	vmw_validation_res_unreserve(ctx, true);
722 	if (ctx->res_mutex)
723 		mutex_unlock(ctx->res_mutex);
724 	vmw_validation_unref_lists(ctx);
725 }
726 
727 /**
728  * vmw_validation_cone - Commit validation actions after command submission
729  * success.
730  * @ctx: The validation context.
731  * @fence: Fence with which to fence all buffer objects taking part in the
732  * command submission.
733  *
734  * The caller does NOT need to unref resources after a call to this function.
735  */
736 void vmw_validation_done(struct vmw_validation_context *ctx,
737 			 struct vmw_fence_obj *fence)
738 {
739 	vmw_validation_bo_fence(ctx, fence);
740 	vmw_validation_res_unreserve(ctx, false);
741 	if (ctx->res_mutex)
742 		mutex_unlock(ctx->res_mutex);
743 	vmw_validation_unref_lists(ctx);
744 }
745 
746 /**
747  * vmw_validation_preload_bo - Preload the validation memory allocator for a
748  * call to vmw_validation_add_bo().
749  * @ctx: Pointer to the validation context.
750  *
751  * Iff this function returns successfully, the next call to
752  * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
753  * but voids the guarantee.
754  *
755  * Returns: Zero if successful, %-EINVAL otherwise.
756  */
757 int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
758 {
759 	unsigned int size = sizeof(struct vmw_validation_bo_node);
760 
761 	if (!vmw_validation_mem_alloc(ctx, size))
762 		return -ENOMEM;
763 
764 	ctx->mem_size_left += size;
765 	return 0;
766 }
767 
768 /**
769  * vmw_validation_preload_res - Preload the validation memory allocator for a
770  * call to vmw_validation_add_res().
771  * @ctx: Pointer to the validation context.
772  * @size: Size of the validation node extra data. See below.
773  *
774  * Iff this function returns successfully, the next call to
775  * vmw_validation_add_res() with the same or smaller @size is guaranteed not to
776  * sleep. An error is not fatal but voids the guarantee.
777  *
778  * Returns: Zero if successful, %-EINVAL otherwise.
779  */
780 int vmw_validation_preload_res(struct vmw_validation_context *ctx,
781 			       unsigned int size)
782 {
783 	size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
784 				    size) +
785 		vmw_validation_align(sizeof(struct vmw_validation_bo_node));
786 	if (!vmw_validation_mem_alloc(ctx, size))
787 		return -ENOMEM;
788 
789 	ctx->mem_size_left += size;
790 	return 0;
791 }
792