xref: /openbmc/linux/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c (revision d9a07577b8a3131c90c187fb2b89662bee535cfd)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/ttm/ttm_placement.h>
29 
30 #include "vmwgfx_resource_priv.h"
31 #include "vmwgfx_binding.h"
32 #include "vmwgfx_drv.h"
33 
34 #define VMW_RES_EVICT_ERR_COUNT 10
35 
36 /**
37  * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
38  * @res: The resource
39  */
40 void vmw_resource_mob_attach(struct vmw_resource *res)
41 {
42 	struct vmw_buffer_object *backup = res->backup;
43 
44 	dma_resv_assert_held(res->backup->base.base.resv);
45 	res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
46 		res->func->prio;
47 	list_add_tail(&res->mob_head, &backup->res_list);
48 	vmw_bo_prio_add(backup, res->used_prio);
49 }
50 
51 /**
52  * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
53  * @res: The resource
54  */
55 void vmw_resource_mob_detach(struct vmw_resource *res)
56 {
57 	struct vmw_buffer_object *backup = res->backup;
58 
59 	dma_resv_assert_held(backup->base.base.resv);
60 	if (vmw_resource_mob_attached(res)) {
61 		list_del_init(&res->mob_head);
62 		vmw_bo_prio_del(backup, res->used_prio);
63 	}
64 }
65 
66 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
67 {
68 	kref_get(&res->kref);
69 	return res;
70 }
71 
72 struct vmw_resource *
73 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
74 {
75 	return kref_get_unless_zero(&res->kref) ? res : NULL;
76 }
77 
78 /**
79  * vmw_resource_release_id - release a resource id to the id manager.
80  *
81  * @res: Pointer to the resource.
82  *
83  * Release the resource id to the resource id manager and set it to -1
84  */
85 void vmw_resource_release_id(struct vmw_resource *res)
86 {
87 	struct vmw_private *dev_priv = res->dev_priv;
88 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
89 
90 	spin_lock(&dev_priv->resource_lock);
91 	if (res->id != -1)
92 		idr_remove(idr, res->id);
93 	res->id = -1;
94 	spin_unlock(&dev_priv->resource_lock);
95 }
96 
97 static void vmw_resource_release(struct kref *kref)
98 {
99 	struct vmw_resource *res =
100 	    container_of(kref, struct vmw_resource, kref);
101 	struct vmw_private *dev_priv = res->dev_priv;
102 	int id;
103 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
104 
105 	spin_lock(&dev_priv->resource_lock);
106 	list_del_init(&res->lru_head);
107 	spin_unlock(&dev_priv->resource_lock);
108 	if (res->backup) {
109 		struct ttm_buffer_object *bo = &res->backup->base;
110 
111 		ttm_bo_reserve(bo, false, false, NULL);
112 		if (vmw_resource_mob_attached(res) &&
113 		    res->func->unbind != NULL) {
114 			struct ttm_validate_buffer val_buf;
115 
116 			val_buf.bo = bo;
117 			val_buf.num_shared = 0;
118 			res->func->unbind(res, false, &val_buf);
119 		}
120 		res->backup_dirty = false;
121 		vmw_resource_mob_detach(res);
122 		ttm_bo_unreserve(bo);
123 		vmw_bo_unreference(&res->backup);
124 	}
125 
126 	if (likely(res->hw_destroy != NULL)) {
127 		mutex_lock(&dev_priv->binding_mutex);
128 		vmw_binding_res_list_kill(&res->binding_head);
129 		mutex_unlock(&dev_priv->binding_mutex);
130 		res->hw_destroy(res);
131 	}
132 
133 	id = res->id;
134 	if (res->res_free != NULL)
135 		res->res_free(res);
136 	else
137 		kfree(res);
138 
139 	spin_lock(&dev_priv->resource_lock);
140 	if (id != -1)
141 		idr_remove(idr, id);
142 	spin_unlock(&dev_priv->resource_lock);
143 }
144 
145 void vmw_resource_unreference(struct vmw_resource **p_res)
146 {
147 	struct vmw_resource *res = *p_res;
148 
149 	*p_res = NULL;
150 	kref_put(&res->kref, vmw_resource_release);
151 }
152 
153 
154 /**
155  * vmw_resource_alloc_id - release a resource id to the id manager.
156  *
157  * @res: Pointer to the resource.
158  *
159  * Allocate the lowest free resource from the resource manager, and set
160  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
161  */
162 int vmw_resource_alloc_id(struct vmw_resource *res)
163 {
164 	struct vmw_private *dev_priv = res->dev_priv;
165 	int ret;
166 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
167 
168 	BUG_ON(res->id != -1);
169 
170 	idr_preload(GFP_KERNEL);
171 	spin_lock(&dev_priv->resource_lock);
172 
173 	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
174 	if (ret >= 0)
175 		res->id = ret;
176 
177 	spin_unlock(&dev_priv->resource_lock);
178 	idr_preload_end();
179 	return ret < 0 ? ret : 0;
180 }
181 
182 /**
183  * vmw_resource_init - initialize a struct vmw_resource
184  *
185  * @dev_priv:       Pointer to a device private struct.
186  * @res:            The struct vmw_resource to initialize.
187  * @obj_type:       Resource object type.
188  * @delay_id:       Boolean whether to defer device id allocation until
189  *                  the first validation.
190  * @res_free:       Resource destructor.
191  * @func:           Resource function table.
192  */
193 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
194 		      bool delay_id,
195 		      void (*res_free) (struct vmw_resource *res),
196 		      const struct vmw_res_func *func)
197 {
198 	kref_init(&res->kref);
199 	res->hw_destroy = NULL;
200 	res->res_free = res_free;
201 	res->dev_priv = dev_priv;
202 	res->func = func;
203 	INIT_LIST_HEAD(&res->lru_head);
204 	INIT_LIST_HEAD(&res->mob_head);
205 	INIT_LIST_HEAD(&res->binding_head);
206 	res->id = -1;
207 	res->backup = NULL;
208 	res->backup_offset = 0;
209 	res->backup_dirty = false;
210 	res->res_dirty = false;
211 	res->used_prio = 3;
212 	if (delay_id)
213 		return 0;
214 	else
215 		return vmw_resource_alloc_id(res);
216 }
217 
218 
219 /**
220  * vmw_user_resource_lookup_handle - lookup a struct resource from a
221  * TTM user-space handle and perform basic type checks
222  *
223  * @dev_priv:     Pointer to a device private struct
224  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
225  * @handle:       The TTM user-space handle
226  * @converter:    Pointer to an object describing the resource type
227  * @p_res:        On successful return the location pointed to will contain
228  *                a pointer to a refcounted struct vmw_resource.
229  *
230  * If the handle can't be found or is associated with an incorrect resource
231  * type, -EINVAL will be returned.
232  */
233 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
234 				    struct ttm_object_file *tfile,
235 				    uint32_t handle,
236 				    const struct vmw_user_resource_conv
237 				    *converter,
238 				    struct vmw_resource **p_res)
239 {
240 	struct ttm_base_object *base;
241 	struct vmw_resource *res;
242 	int ret = -EINVAL;
243 
244 	base = ttm_base_object_lookup(tfile, handle);
245 	if (unlikely(base == NULL))
246 		return -EINVAL;
247 
248 	if (unlikely(ttm_base_object_type(base) != converter->object_type))
249 		goto out_bad_resource;
250 
251 	res = converter->base_obj_to_res(base);
252 	kref_get(&res->kref);
253 
254 	*p_res = res;
255 	ret = 0;
256 
257 out_bad_resource:
258 	ttm_base_object_unref(&base);
259 
260 	return ret;
261 }
262 
263 /**
264  * vmw_user_resource_lookup_handle - lookup a struct resource from a
265  * TTM user-space handle and perform basic type checks
266  *
267  * @dev_priv:     Pointer to a device private struct
268  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
269  * @handle:       The TTM user-space handle
270  * @converter:    Pointer to an object describing the resource type
271  * @p_res:        On successful return the location pointed to will contain
272  *                a pointer to a refcounted struct vmw_resource.
273  *
274  * If the handle can't be found or is associated with an incorrect resource
275  * type, -EINVAL will be returned.
276  */
277 struct vmw_resource *
278 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
279 				      struct ttm_object_file *tfile,
280 				      uint32_t handle,
281 				      const struct vmw_user_resource_conv
282 				      *converter)
283 {
284 	struct ttm_base_object *base;
285 
286 	base = ttm_base_object_noref_lookup(tfile, handle);
287 	if (!base)
288 		return ERR_PTR(-ESRCH);
289 
290 	if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
291 		ttm_base_object_noref_release();
292 		return ERR_PTR(-EINVAL);
293 	}
294 
295 	return converter->base_obj_to_res(base);
296 }
297 
298 /**
299  * Helper function that looks either a surface or bo.
300  *
301  * The pointer this pointed at by out_surf and out_buf needs to be null.
302  */
303 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
304 			   struct ttm_object_file *tfile,
305 			   uint32_t handle,
306 			   struct vmw_surface **out_surf,
307 			   struct vmw_buffer_object **out_buf)
308 {
309 	struct vmw_resource *res;
310 	int ret;
311 
312 	BUG_ON(*out_surf || *out_buf);
313 
314 	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
315 					      user_surface_converter,
316 					      &res);
317 	if (!ret) {
318 		*out_surf = vmw_res_to_srf(res);
319 		return 0;
320 	}
321 
322 	*out_surf = NULL;
323 	ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
324 	return ret;
325 }
326 
327 /**
328  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
329  *
330  * @res:            The resource for which to allocate a backup buffer.
331  * @interruptible:  Whether any sleeps during allocation should be
332  *                  performed while interruptible.
333  */
334 static int vmw_resource_buf_alloc(struct vmw_resource *res,
335 				  bool interruptible)
336 {
337 	unsigned long size =
338 		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
339 	struct vmw_buffer_object *backup;
340 	int ret;
341 
342 	if (likely(res->backup)) {
343 		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
344 		return 0;
345 	}
346 
347 	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
348 	if (unlikely(!backup))
349 		return -ENOMEM;
350 
351 	ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
352 			      res->func->backup_placement,
353 			      interruptible,
354 			      &vmw_bo_bo_free);
355 	if (unlikely(ret != 0))
356 		goto out_no_bo;
357 
358 	res->backup = backup;
359 
360 out_no_bo:
361 	return ret;
362 }
363 
364 /**
365  * vmw_resource_do_validate - Make a resource up-to-date and visible
366  *                            to the device.
367  *
368  * @res:            The resource to make visible to the device.
369  * @val_buf:        Information about a buffer possibly
370  *                  containing backup data if a bind operation is needed.
371  *
372  * On hardware resource shortage, this function returns -EBUSY and
373  * should be retried once resources have been freed up.
374  */
375 static int vmw_resource_do_validate(struct vmw_resource *res,
376 				    struct ttm_validate_buffer *val_buf)
377 {
378 	int ret = 0;
379 	const struct vmw_res_func *func = res->func;
380 
381 	if (unlikely(res->id == -1)) {
382 		ret = func->create(res);
383 		if (unlikely(ret != 0))
384 			return ret;
385 	}
386 
387 	if (func->bind &&
388 	    ((func->needs_backup && !vmw_resource_mob_attached(res) &&
389 	      val_buf->bo != NULL) ||
390 	     (!func->needs_backup && val_buf->bo != NULL))) {
391 		ret = func->bind(res, val_buf);
392 		if (unlikely(ret != 0))
393 			goto out_bind_failed;
394 		if (func->needs_backup)
395 			vmw_resource_mob_attach(res);
396 	}
397 
398 	return 0;
399 
400 out_bind_failed:
401 	func->destroy(res);
402 
403 	return ret;
404 }
405 
406 /**
407  * vmw_resource_unreserve - Unreserve a resource previously reserved for
408  * command submission.
409  *
410  * @res:               Pointer to the struct vmw_resource to unreserve.
411  * @dirty_set:         Change dirty status of the resource.
412  * @dirty:             When changing dirty status indicates the new status.
413  * @switch_backup:     Backup buffer has been switched.
414  * @new_backup:        Pointer to new backup buffer if command submission
415  *                     switched. May be NULL.
416  * @new_backup_offset: New backup offset if @switch_backup is true.
417  *
418  * Currently unreserving a resource means putting it back on the device's
419  * resource lru list, so that it can be evicted if necessary.
420  */
421 void vmw_resource_unreserve(struct vmw_resource *res,
422 			    bool dirty_set,
423 			    bool dirty,
424 			    bool switch_backup,
425 			    struct vmw_buffer_object *new_backup,
426 			    unsigned long new_backup_offset)
427 {
428 	struct vmw_private *dev_priv = res->dev_priv;
429 
430 	if (!list_empty(&res->lru_head))
431 		return;
432 
433 	if (switch_backup && new_backup != res->backup) {
434 		if (res->backup) {
435 			vmw_resource_mob_detach(res);
436 			vmw_bo_unreference(&res->backup);
437 		}
438 
439 		if (new_backup) {
440 			res->backup = vmw_bo_reference(new_backup);
441 			vmw_resource_mob_attach(res);
442 		} else {
443 			res->backup = NULL;
444 		}
445 	}
446 	if (switch_backup)
447 		res->backup_offset = new_backup_offset;
448 
449 	if (dirty_set)
450 		res->res_dirty = dirty;
451 
452 	if (!res->func->may_evict || res->id == -1 || res->pin_count)
453 		return;
454 
455 	spin_lock(&dev_priv->resource_lock);
456 	list_add_tail(&res->lru_head,
457 		      &res->dev_priv->res_lru[res->func->res_type]);
458 	spin_unlock(&dev_priv->resource_lock);
459 }
460 
461 /**
462  * vmw_resource_check_buffer - Check whether a backup buffer is needed
463  *                             for a resource and in that case, allocate
464  *                             one, reserve and validate it.
465  *
466  * @ticket:         The ww aqcquire context to use, or NULL if trylocking.
467  * @res:            The resource for which to allocate a backup buffer.
468  * @interruptible:  Whether any sleeps during allocation should be
469  *                  performed while interruptible.
470  * @val_buf:        On successful return contains data about the
471  *                  reserved and validated backup buffer.
472  */
473 static int
474 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
475 			  struct vmw_resource *res,
476 			  bool interruptible,
477 			  struct ttm_validate_buffer *val_buf)
478 {
479 	struct ttm_operation_ctx ctx = { true, false };
480 	struct list_head val_list;
481 	bool backup_dirty = false;
482 	int ret;
483 
484 	if (unlikely(res->backup == NULL)) {
485 		ret = vmw_resource_buf_alloc(res, interruptible);
486 		if (unlikely(ret != 0))
487 			return ret;
488 	}
489 
490 	INIT_LIST_HEAD(&val_list);
491 	ttm_bo_get(&res->backup->base);
492 	val_buf->bo = &res->backup->base;
493 	val_buf->num_shared = 0;
494 	list_add_tail(&val_buf->head, &val_list);
495 	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
496 	if (unlikely(ret != 0))
497 		goto out_no_reserve;
498 
499 	if (res->func->needs_backup && !vmw_resource_mob_attached(res))
500 		return 0;
501 
502 	backup_dirty = res->backup_dirty;
503 	ret = ttm_bo_validate(&res->backup->base,
504 			      res->func->backup_placement,
505 			      &ctx);
506 
507 	if (unlikely(ret != 0))
508 		goto out_no_validate;
509 
510 	return 0;
511 
512 out_no_validate:
513 	ttm_eu_backoff_reservation(ticket, &val_list);
514 out_no_reserve:
515 	ttm_bo_put(val_buf->bo);
516 	val_buf->bo = NULL;
517 	if (backup_dirty)
518 		vmw_bo_unreference(&res->backup);
519 
520 	return ret;
521 }
522 
523 /**
524  * vmw_resource_reserve - Reserve a resource for command submission
525  *
526  * @res:            The resource to reserve.
527  *
528  * This function takes the resource off the LRU list and make sure
529  * a backup buffer is present for guest-backed resources. However,
530  * the buffer may not be bound to the resource at this point.
531  *
532  */
533 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
534 			 bool no_backup)
535 {
536 	struct vmw_private *dev_priv = res->dev_priv;
537 	int ret;
538 
539 	spin_lock(&dev_priv->resource_lock);
540 	list_del_init(&res->lru_head);
541 	spin_unlock(&dev_priv->resource_lock);
542 
543 	if (res->func->needs_backup && res->backup == NULL &&
544 	    !no_backup) {
545 		ret = vmw_resource_buf_alloc(res, interruptible);
546 		if (unlikely(ret != 0)) {
547 			DRM_ERROR("Failed to allocate a backup buffer "
548 				  "of size %lu. bytes\n",
549 				  (unsigned long) res->backup_size);
550 			return ret;
551 		}
552 	}
553 
554 	return 0;
555 }
556 
557 /**
558  * vmw_resource_backoff_reservation - Unreserve and unreference a
559  *                                    backup buffer
560  *.
561  * @ticket:         The ww acquire ctx used for reservation.
562  * @val_buf:        Backup buffer information.
563  */
564 static void
565 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
566 				 struct ttm_validate_buffer *val_buf)
567 {
568 	struct list_head val_list;
569 
570 	if (likely(val_buf->bo == NULL))
571 		return;
572 
573 	INIT_LIST_HEAD(&val_list);
574 	list_add_tail(&val_buf->head, &val_list);
575 	ttm_eu_backoff_reservation(ticket, &val_list);
576 	ttm_bo_put(val_buf->bo);
577 	val_buf->bo = NULL;
578 }
579 
580 /**
581  * vmw_resource_do_evict - Evict a resource, and transfer its data
582  *                         to a backup buffer.
583  *
584  * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
585  * @res:            The resource to evict.
586  * @interruptible:  Whether to wait interruptible.
587  */
588 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
589 				 struct vmw_resource *res, bool interruptible)
590 {
591 	struct ttm_validate_buffer val_buf;
592 	const struct vmw_res_func *func = res->func;
593 	int ret;
594 
595 	BUG_ON(!func->may_evict);
596 
597 	val_buf.bo = NULL;
598 	val_buf.num_shared = 0;
599 	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
600 	if (unlikely(ret != 0))
601 		return ret;
602 
603 	if (unlikely(func->unbind != NULL &&
604 		     (!func->needs_backup || vmw_resource_mob_attached(res)))) {
605 		ret = func->unbind(res, res->res_dirty, &val_buf);
606 		if (unlikely(ret != 0))
607 			goto out_no_unbind;
608 		vmw_resource_mob_detach(res);
609 	}
610 	ret = func->destroy(res);
611 	res->backup_dirty = true;
612 	res->res_dirty = false;
613 out_no_unbind:
614 	vmw_resource_backoff_reservation(ticket, &val_buf);
615 
616 	return ret;
617 }
618 
619 
620 /**
621  * vmw_resource_validate - Make a resource up-to-date and visible
622  *                         to the device.
623  * @res: The resource to make visible to the device.
624  * @intr: Perform waits interruptible if possible.
625  *
626  * On succesful return, any backup DMA buffer pointed to by @res->backup will
627  * be reserved and validated.
628  * On hardware resource shortage, this function will repeatedly evict
629  * resources of the same type until the validation succeeds.
630  *
631  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
632  * on failure.
633  */
634 int vmw_resource_validate(struct vmw_resource *res, bool intr)
635 {
636 	int ret;
637 	struct vmw_resource *evict_res;
638 	struct vmw_private *dev_priv = res->dev_priv;
639 	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
640 	struct ttm_validate_buffer val_buf;
641 	unsigned err_count = 0;
642 
643 	if (!res->func->create)
644 		return 0;
645 
646 	val_buf.bo = NULL;
647 	val_buf.num_shared = 0;
648 	if (res->backup)
649 		val_buf.bo = &res->backup->base;
650 	do {
651 		ret = vmw_resource_do_validate(res, &val_buf);
652 		if (likely(ret != -EBUSY))
653 			break;
654 
655 		spin_lock(&dev_priv->resource_lock);
656 		if (list_empty(lru_list) || !res->func->may_evict) {
657 			DRM_ERROR("Out of device device resources "
658 				  "for %s.\n", res->func->type_name);
659 			ret = -EBUSY;
660 			spin_unlock(&dev_priv->resource_lock);
661 			break;
662 		}
663 
664 		evict_res = vmw_resource_reference
665 			(list_first_entry(lru_list, struct vmw_resource,
666 					  lru_head));
667 		list_del_init(&evict_res->lru_head);
668 
669 		spin_unlock(&dev_priv->resource_lock);
670 
671 		/* Trylock backup buffers with a NULL ticket. */
672 		ret = vmw_resource_do_evict(NULL, evict_res, intr);
673 		if (unlikely(ret != 0)) {
674 			spin_lock(&dev_priv->resource_lock);
675 			list_add_tail(&evict_res->lru_head, lru_list);
676 			spin_unlock(&dev_priv->resource_lock);
677 			if (ret == -ERESTARTSYS ||
678 			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
679 				vmw_resource_unreference(&evict_res);
680 				goto out_no_validate;
681 			}
682 		}
683 
684 		vmw_resource_unreference(&evict_res);
685 	} while (1);
686 
687 	if (unlikely(ret != 0))
688 		goto out_no_validate;
689 	else if (!res->func->needs_backup && res->backup) {
690 		WARN_ON_ONCE(vmw_resource_mob_attached(res));
691 		vmw_bo_unreference(&res->backup);
692 	}
693 
694 	return 0;
695 
696 out_no_validate:
697 	return ret;
698 }
699 
700 
701 /**
702  * vmw_resource_unbind_list
703  *
704  * @vbo: Pointer to the current backing MOB.
705  *
706  * Evicts the Guest Backed hardware resource if the backup
707  * buffer is being moved out of MOB memory.
708  * Note that this function will not race with the resource
709  * validation code, since resource validation and eviction
710  * both require the backup buffer to be reserved.
711  */
712 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
713 {
714 
715 	struct vmw_resource *res, *next;
716 	struct ttm_validate_buffer val_buf = {
717 		.bo = &vbo->base,
718 		.num_shared = 0
719 	};
720 
721 	dma_resv_assert_held(vbo->base.base.resv);
722 	list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
723 		if (!res->func->unbind)
724 			continue;
725 
726 		(void) res->func->unbind(res, res->res_dirty, &val_buf);
727 		res->backup_dirty = true;
728 		res->res_dirty = false;
729 		vmw_resource_mob_detach(res);
730 	}
731 
732 	(void) ttm_bo_wait(&vbo->base, false, false);
733 }
734 
735 
736 /**
737  * vmw_query_readback_all - Read back cached query states
738  *
739  * @dx_query_mob: Buffer containing the DX query MOB
740  *
741  * Read back cached states from the device if they exist.  This function
742  * assumings binding_mutex is held.
743  */
744 int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
745 {
746 	struct vmw_resource *dx_query_ctx;
747 	struct vmw_private *dev_priv;
748 	struct {
749 		SVGA3dCmdHeader header;
750 		SVGA3dCmdDXReadbackAllQuery body;
751 	} *cmd;
752 
753 
754 	/* No query bound, so do nothing */
755 	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
756 		return 0;
757 
758 	dx_query_ctx = dx_query_mob->dx_query_ctx;
759 	dev_priv     = dx_query_ctx->dev_priv;
760 
761 	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id);
762 	if (unlikely(cmd == NULL))
763 		return -ENOMEM;
764 
765 	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
766 	cmd->header.size = sizeof(cmd->body);
767 	cmd->body.cid    = dx_query_ctx->id;
768 
769 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
770 
771 	/* Triggers a rebind the next time affected context is bound */
772 	dx_query_mob->dx_query_ctx = NULL;
773 
774 	return 0;
775 }
776 
777 
778 
779 /**
780  * vmw_query_move_notify - Read back cached query states
781  *
782  * @bo: The TTM buffer object about to move.
783  * @mem: The memory region @bo is moving to.
784  *
785  * Called before the query MOB is swapped out to read back cached query
786  * states from the device.
787  */
788 void vmw_query_move_notify(struct ttm_buffer_object *bo,
789 			   struct ttm_mem_reg *mem)
790 {
791 	struct vmw_buffer_object *dx_query_mob;
792 	struct ttm_bo_device *bdev = bo->bdev;
793 	struct vmw_private *dev_priv;
794 
795 
796 	dev_priv = container_of(bdev, struct vmw_private, bdev);
797 
798 	mutex_lock(&dev_priv->binding_mutex);
799 
800 	dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
801 	if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
802 		mutex_unlock(&dev_priv->binding_mutex);
803 		return;
804 	}
805 
806 	/* If BO is being moved from MOB to system memory */
807 	if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
808 		struct vmw_fence_obj *fence;
809 
810 		(void) vmw_query_readback_all(dx_query_mob);
811 		mutex_unlock(&dev_priv->binding_mutex);
812 
813 		/* Create a fence and attach the BO to it */
814 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
815 		vmw_bo_fence_single(bo, fence);
816 
817 		if (fence != NULL)
818 			vmw_fence_obj_unreference(&fence);
819 
820 		(void) ttm_bo_wait(bo, false, false);
821 	} else
822 		mutex_unlock(&dev_priv->binding_mutex);
823 
824 }
825 
826 /**
827  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
828  *
829  * @res:            The resource being queried.
830  */
831 bool vmw_resource_needs_backup(const struct vmw_resource *res)
832 {
833 	return res->func->needs_backup;
834 }
835 
836 /**
837  * vmw_resource_evict_type - Evict all resources of a specific type
838  *
839  * @dev_priv:       Pointer to a device private struct
840  * @type:           The resource type to evict
841  *
842  * To avoid thrashing starvation or as part of the hibernation sequence,
843  * try to evict all evictable resources of a specific type.
844  */
845 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
846 				    enum vmw_res_type type)
847 {
848 	struct list_head *lru_list = &dev_priv->res_lru[type];
849 	struct vmw_resource *evict_res;
850 	unsigned err_count = 0;
851 	int ret;
852 	struct ww_acquire_ctx ticket;
853 
854 	do {
855 		spin_lock(&dev_priv->resource_lock);
856 
857 		if (list_empty(lru_list))
858 			goto out_unlock;
859 
860 		evict_res = vmw_resource_reference(
861 			list_first_entry(lru_list, struct vmw_resource,
862 					 lru_head));
863 		list_del_init(&evict_res->lru_head);
864 		spin_unlock(&dev_priv->resource_lock);
865 
866 		/* Wait lock backup buffers with a ticket. */
867 		ret = vmw_resource_do_evict(&ticket, evict_res, false);
868 		if (unlikely(ret != 0)) {
869 			spin_lock(&dev_priv->resource_lock);
870 			list_add_tail(&evict_res->lru_head, lru_list);
871 			spin_unlock(&dev_priv->resource_lock);
872 			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
873 				vmw_resource_unreference(&evict_res);
874 				return;
875 			}
876 		}
877 
878 		vmw_resource_unreference(&evict_res);
879 	} while (1);
880 
881 out_unlock:
882 	spin_unlock(&dev_priv->resource_lock);
883 }
884 
885 /**
886  * vmw_resource_evict_all - Evict all evictable resources
887  *
888  * @dev_priv:       Pointer to a device private struct
889  *
890  * To avoid thrashing starvation or as part of the hibernation sequence,
891  * evict all evictable resources. In particular this means that all
892  * guest-backed resources that are registered with the device are
893  * evicted and the OTable becomes clean.
894  */
895 void vmw_resource_evict_all(struct vmw_private *dev_priv)
896 {
897 	enum vmw_res_type type;
898 
899 	mutex_lock(&dev_priv->cmdbuf_mutex);
900 
901 	for (type = 0; type < vmw_res_max; ++type)
902 		vmw_resource_evict_type(dev_priv, type);
903 
904 	mutex_unlock(&dev_priv->cmdbuf_mutex);
905 }
906 
907 /**
908  * vmw_resource_pin - Add a pin reference on a resource
909  *
910  * @res: The resource to add a pin reference on
911  *
912  * This function adds a pin reference, and if needed validates the resource.
913  * Having a pin reference means that the resource can never be evicted, and
914  * its id will never change as long as there is a pin reference.
915  * This function returns 0 on success and a negative error code on failure.
916  */
917 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
918 {
919 	struct ttm_operation_ctx ctx = { interruptible, false };
920 	struct vmw_private *dev_priv = res->dev_priv;
921 	int ret;
922 
923 	ttm_write_lock(&dev_priv->reservation_sem, interruptible);
924 	mutex_lock(&dev_priv->cmdbuf_mutex);
925 	ret = vmw_resource_reserve(res, interruptible, false);
926 	if (ret)
927 		goto out_no_reserve;
928 
929 	if (res->pin_count == 0) {
930 		struct vmw_buffer_object *vbo = NULL;
931 
932 		if (res->backup) {
933 			vbo = res->backup;
934 
935 			ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
936 			if (!vbo->pin_count) {
937 				ret = ttm_bo_validate
938 					(&vbo->base,
939 					 res->func->backup_placement,
940 					 &ctx);
941 				if (ret) {
942 					ttm_bo_unreserve(&vbo->base);
943 					goto out_no_validate;
944 				}
945 			}
946 
947 			/* Do we really need to pin the MOB as well? */
948 			vmw_bo_pin_reserved(vbo, true);
949 		}
950 		ret = vmw_resource_validate(res, interruptible);
951 		if (vbo)
952 			ttm_bo_unreserve(&vbo->base);
953 		if (ret)
954 			goto out_no_validate;
955 	}
956 	res->pin_count++;
957 
958 out_no_validate:
959 	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
960 out_no_reserve:
961 	mutex_unlock(&dev_priv->cmdbuf_mutex);
962 	ttm_write_unlock(&dev_priv->reservation_sem);
963 
964 	return ret;
965 }
966 
967 /**
968  * vmw_resource_unpin - Remove a pin reference from a resource
969  *
970  * @res: The resource to remove a pin reference from
971  *
972  * Having a pin reference means that the resource can never be evicted, and
973  * its id will never change as long as there is a pin reference.
974  */
975 void vmw_resource_unpin(struct vmw_resource *res)
976 {
977 	struct vmw_private *dev_priv = res->dev_priv;
978 	int ret;
979 
980 	(void) ttm_read_lock(&dev_priv->reservation_sem, false);
981 	mutex_lock(&dev_priv->cmdbuf_mutex);
982 
983 	ret = vmw_resource_reserve(res, false, true);
984 	WARN_ON(ret);
985 
986 	WARN_ON(res->pin_count == 0);
987 	if (--res->pin_count == 0 && res->backup) {
988 		struct vmw_buffer_object *vbo = res->backup;
989 
990 		(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
991 		vmw_bo_pin_reserved(vbo, false);
992 		ttm_bo_unreserve(&vbo->base);
993 	}
994 
995 	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
996 
997 	mutex_unlock(&dev_priv->cmdbuf_mutex);
998 	ttm_read_unlock(&dev_priv->reservation_sem);
999 }
1000 
1001 /**
1002  * vmw_res_type - Return the resource type
1003  *
1004  * @res: Pointer to the resource
1005  */
1006 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1007 {
1008 	return res->func->res_type;
1009 }
1010