1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <drm/drmP.h>
32 #include "vmwgfx_resource_priv.h"
33 #include "vmwgfx_binding.h"
34 
35 #define VMW_RES_EVICT_ERR_COUNT 10
36 
37 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
38 {
39 	kref_get(&res->kref);
40 	return res;
41 }
42 
43 struct vmw_resource *
44 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
45 {
46 	return kref_get_unless_zero(&res->kref) ? res : NULL;
47 }
48 
49 /**
50  * vmw_resource_release_id - release a resource id to the id manager.
51  *
52  * @res: Pointer to the resource.
53  *
54  * Release the resource id to the resource id manager and set it to -1
55  */
56 void vmw_resource_release_id(struct vmw_resource *res)
57 {
58 	struct vmw_private *dev_priv = res->dev_priv;
59 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
60 
61 	spin_lock(&dev_priv->resource_lock);
62 	if (res->id != -1)
63 		idr_remove(idr, res->id);
64 	res->id = -1;
65 	spin_unlock(&dev_priv->resource_lock);
66 }
67 
68 static void vmw_resource_release(struct kref *kref)
69 {
70 	struct vmw_resource *res =
71 	    container_of(kref, struct vmw_resource, kref);
72 	struct vmw_private *dev_priv = res->dev_priv;
73 	int id;
74 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
75 
76 	spin_lock(&dev_priv->resource_lock);
77 	list_del_init(&res->lru_head);
78 	spin_unlock(&dev_priv->resource_lock);
79 	if (res->backup) {
80 		struct ttm_buffer_object *bo = &res->backup->base;
81 
82 		ttm_bo_reserve(bo, false, false, NULL);
83 		if (!list_empty(&res->mob_head) &&
84 		    res->func->unbind != NULL) {
85 			struct ttm_validate_buffer val_buf;
86 
87 			val_buf.bo = bo;
88 			val_buf.num_shared = 0;
89 			res->func->unbind(res, false, &val_buf);
90 		}
91 		res->backup_dirty = false;
92 		list_del_init(&res->mob_head);
93 		ttm_bo_unreserve(bo);
94 		vmw_bo_unreference(&res->backup);
95 	}
96 
97 	if (likely(res->hw_destroy != NULL)) {
98 		mutex_lock(&dev_priv->binding_mutex);
99 		vmw_binding_res_list_kill(&res->binding_head);
100 		mutex_unlock(&dev_priv->binding_mutex);
101 		res->hw_destroy(res);
102 	}
103 
104 	id = res->id;
105 	if (res->res_free != NULL)
106 		res->res_free(res);
107 	else
108 		kfree(res);
109 
110 	spin_lock(&dev_priv->resource_lock);
111 	if (id != -1)
112 		idr_remove(idr, id);
113 	spin_unlock(&dev_priv->resource_lock);
114 }
115 
116 void vmw_resource_unreference(struct vmw_resource **p_res)
117 {
118 	struct vmw_resource *res = *p_res;
119 
120 	*p_res = NULL;
121 	kref_put(&res->kref, vmw_resource_release);
122 }
123 
124 
125 /**
126  * vmw_resource_alloc_id - release a resource id to the id manager.
127  *
128  * @res: Pointer to the resource.
129  *
130  * Allocate the lowest free resource from the resource manager, and set
131  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
132  */
133 int vmw_resource_alloc_id(struct vmw_resource *res)
134 {
135 	struct vmw_private *dev_priv = res->dev_priv;
136 	int ret;
137 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
138 
139 	BUG_ON(res->id != -1);
140 
141 	idr_preload(GFP_KERNEL);
142 	spin_lock(&dev_priv->resource_lock);
143 
144 	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
145 	if (ret >= 0)
146 		res->id = ret;
147 
148 	spin_unlock(&dev_priv->resource_lock);
149 	idr_preload_end();
150 	return ret < 0 ? ret : 0;
151 }
152 
153 /**
154  * vmw_resource_init - initialize a struct vmw_resource
155  *
156  * @dev_priv:       Pointer to a device private struct.
157  * @res:            The struct vmw_resource to initialize.
158  * @obj_type:       Resource object type.
159  * @delay_id:       Boolean whether to defer device id allocation until
160  *                  the first validation.
161  * @res_free:       Resource destructor.
162  * @func:           Resource function table.
163  */
164 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
165 		      bool delay_id,
166 		      void (*res_free) (struct vmw_resource *res),
167 		      const struct vmw_res_func *func)
168 {
169 	kref_init(&res->kref);
170 	res->hw_destroy = NULL;
171 	res->res_free = res_free;
172 	res->dev_priv = dev_priv;
173 	res->func = func;
174 	INIT_LIST_HEAD(&res->lru_head);
175 	INIT_LIST_HEAD(&res->mob_head);
176 	INIT_LIST_HEAD(&res->binding_head);
177 	res->id = -1;
178 	res->backup = NULL;
179 	res->backup_offset = 0;
180 	res->backup_dirty = false;
181 	res->res_dirty = false;
182 	if (delay_id)
183 		return 0;
184 	else
185 		return vmw_resource_alloc_id(res);
186 }
187 
188 
189 /**
190  * vmw_user_resource_lookup_handle - lookup a struct resource from a
191  * TTM user-space handle and perform basic type checks
192  *
193  * @dev_priv:     Pointer to a device private struct
194  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
195  * @handle:       The TTM user-space handle
196  * @converter:    Pointer to an object describing the resource type
197  * @p_res:        On successful return the location pointed to will contain
198  *                a pointer to a refcounted struct vmw_resource.
199  *
200  * If the handle can't be found or is associated with an incorrect resource
201  * type, -EINVAL will be returned.
202  */
203 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
204 				    struct ttm_object_file *tfile,
205 				    uint32_t handle,
206 				    const struct vmw_user_resource_conv
207 				    *converter,
208 				    struct vmw_resource **p_res)
209 {
210 	struct ttm_base_object *base;
211 	struct vmw_resource *res;
212 	int ret = -EINVAL;
213 
214 	base = ttm_base_object_lookup(tfile, handle);
215 	if (unlikely(base == NULL))
216 		return -EINVAL;
217 
218 	if (unlikely(ttm_base_object_type(base) != converter->object_type))
219 		goto out_bad_resource;
220 
221 	res = converter->base_obj_to_res(base);
222 	kref_get(&res->kref);
223 
224 	*p_res = res;
225 	ret = 0;
226 
227 out_bad_resource:
228 	ttm_base_object_unref(&base);
229 
230 	return ret;
231 }
232 
233 /**
234  * vmw_user_resource_lookup_handle - lookup a struct resource from a
235  * TTM user-space handle and perform basic type checks
236  *
237  * @dev_priv:     Pointer to a device private struct
238  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
239  * @handle:       The TTM user-space handle
240  * @converter:    Pointer to an object describing the resource type
241  * @p_res:        On successful return the location pointed to will contain
242  *                a pointer to a refcounted struct vmw_resource.
243  *
244  * If the handle can't be found or is associated with an incorrect resource
245  * type, -EINVAL will be returned.
246  */
247 struct vmw_resource *
248 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
249 				      struct ttm_object_file *tfile,
250 				      uint32_t handle,
251 				      const struct vmw_user_resource_conv
252 				      *converter)
253 {
254 	struct ttm_base_object *base;
255 
256 	base = ttm_base_object_noref_lookup(tfile, handle);
257 	if (!base)
258 		return ERR_PTR(-ESRCH);
259 
260 	if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
261 		ttm_base_object_noref_release();
262 		return ERR_PTR(-EINVAL);
263 	}
264 
265 	return converter->base_obj_to_res(base);
266 }
267 
268 /**
269  * Helper function that looks either a surface or bo.
270  *
271  * The pointer this pointed at by out_surf and out_buf needs to be null.
272  */
273 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
274 			   struct ttm_object_file *tfile,
275 			   uint32_t handle,
276 			   struct vmw_surface **out_surf,
277 			   struct vmw_buffer_object **out_buf)
278 {
279 	struct vmw_resource *res;
280 	int ret;
281 
282 	BUG_ON(*out_surf || *out_buf);
283 
284 	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
285 					      user_surface_converter,
286 					      &res);
287 	if (!ret) {
288 		*out_surf = vmw_res_to_srf(res);
289 		return 0;
290 	}
291 
292 	*out_surf = NULL;
293 	ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
294 	return ret;
295 }
296 
297 /**
298  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
299  *
300  * @res:            The resource for which to allocate a backup buffer.
301  * @interruptible:  Whether any sleeps during allocation should be
302  *                  performed while interruptible.
303  */
304 static int vmw_resource_buf_alloc(struct vmw_resource *res,
305 				  bool interruptible)
306 {
307 	unsigned long size =
308 		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
309 	struct vmw_buffer_object *backup;
310 	int ret;
311 
312 	if (likely(res->backup)) {
313 		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
314 		return 0;
315 	}
316 
317 	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
318 	if (unlikely(!backup))
319 		return -ENOMEM;
320 
321 	ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
322 			      res->func->backup_placement,
323 			      interruptible,
324 			      &vmw_bo_bo_free);
325 	if (unlikely(ret != 0))
326 		goto out_no_bo;
327 
328 	res->backup = backup;
329 
330 out_no_bo:
331 	return ret;
332 }
333 
334 /**
335  * vmw_resource_do_validate - Make a resource up-to-date and visible
336  *                            to the device.
337  *
338  * @res:            The resource to make visible to the device.
339  * @val_buf:        Information about a buffer possibly
340  *                  containing backup data if a bind operation is needed.
341  *
342  * On hardware resource shortage, this function returns -EBUSY and
343  * should be retried once resources have been freed up.
344  */
345 static int vmw_resource_do_validate(struct vmw_resource *res,
346 				    struct ttm_validate_buffer *val_buf)
347 {
348 	int ret = 0;
349 	const struct vmw_res_func *func = res->func;
350 
351 	if (unlikely(res->id == -1)) {
352 		ret = func->create(res);
353 		if (unlikely(ret != 0))
354 			return ret;
355 	}
356 
357 	if (func->bind &&
358 	    ((func->needs_backup && list_empty(&res->mob_head) &&
359 	      val_buf->bo != NULL) ||
360 	     (!func->needs_backup && val_buf->bo != NULL))) {
361 		ret = func->bind(res, val_buf);
362 		if (unlikely(ret != 0))
363 			goto out_bind_failed;
364 		if (func->needs_backup)
365 			list_add_tail(&res->mob_head, &res->backup->res_list);
366 	}
367 
368 	return 0;
369 
370 out_bind_failed:
371 	func->destroy(res);
372 
373 	return ret;
374 }
375 
376 /**
377  * vmw_resource_unreserve - Unreserve a resource previously reserved for
378  * command submission.
379  *
380  * @res:               Pointer to the struct vmw_resource to unreserve.
381  * @dirty_set:         Change dirty status of the resource.
382  * @dirty:             When changing dirty status indicates the new status.
383  * @switch_backup:     Backup buffer has been switched.
384  * @new_backup:        Pointer to new backup buffer if command submission
385  *                     switched. May be NULL.
386  * @new_backup_offset: New backup offset if @switch_backup is true.
387  *
388  * Currently unreserving a resource means putting it back on the device's
389  * resource lru list, so that it can be evicted if necessary.
390  */
391 void vmw_resource_unreserve(struct vmw_resource *res,
392 			    bool dirty_set,
393 			    bool dirty,
394 			    bool switch_backup,
395 			    struct vmw_buffer_object *new_backup,
396 			    unsigned long new_backup_offset)
397 {
398 	struct vmw_private *dev_priv = res->dev_priv;
399 
400 	if (!list_empty(&res->lru_head))
401 		return;
402 
403 	if (switch_backup && new_backup != res->backup) {
404 		if (res->backup) {
405 			lockdep_assert_held(&res->backup->base.resv->lock.base);
406 			list_del_init(&res->mob_head);
407 			vmw_bo_unreference(&res->backup);
408 		}
409 
410 		if (new_backup) {
411 			res->backup = vmw_bo_reference(new_backup);
412 			lockdep_assert_held(&new_backup->base.resv->lock.base);
413 			list_add_tail(&res->mob_head, &new_backup->res_list);
414 		} else {
415 			res->backup = NULL;
416 		}
417 	}
418 	if (switch_backup)
419 		res->backup_offset = new_backup_offset;
420 
421 	if (dirty_set)
422 		res->res_dirty = dirty;
423 
424 	if (!res->func->may_evict || res->id == -1 || res->pin_count)
425 		return;
426 
427 	spin_lock(&dev_priv->resource_lock);
428 	list_add_tail(&res->lru_head,
429 		      &res->dev_priv->res_lru[res->func->res_type]);
430 	spin_unlock(&dev_priv->resource_lock);
431 }
432 
433 /**
434  * vmw_resource_check_buffer - Check whether a backup buffer is needed
435  *                             for a resource and in that case, allocate
436  *                             one, reserve and validate it.
437  *
438  * @ticket:         The ww aqcquire context to use, or NULL if trylocking.
439  * @res:            The resource for which to allocate a backup buffer.
440  * @interruptible:  Whether any sleeps during allocation should be
441  *                  performed while interruptible.
442  * @val_buf:        On successful return contains data about the
443  *                  reserved and validated backup buffer.
444  */
445 static int
446 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
447 			  struct vmw_resource *res,
448 			  bool interruptible,
449 			  struct ttm_validate_buffer *val_buf)
450 {
451 	struct ttm_operation_ctx ctx = { true, false };
452 	struct list_head val_list;
453 	bool backup_dirty = false;
454 	int ret;
455 
456 	if (unlikely(res->backup == NULL)) {
457 		ret = vmw_resource_buf_alloc(res, interruptible);
458 		if (unlikely(ret != 0))
459 			return ret;
460 	}
461 
462 	INIT_LIST_HEAD(&val_list);
463 	ttm_bo_get(&res->backup->base);
464 	val_buf->bo = &res->backup->base;
465 	val_buf->num_shared = 0;
466 	list_add_tail(&val_buf->head, &val_list);
467 	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
468 	if (unlikely(ret != 0))
469 		goto out_no_reserve;
470 
471 	if (res->func->needs_backup && list_empty(&res->mob_head))
472 		return 0;
473 
474 	backup_dirty = res->backup_dirty;
475 	ret = ttm_bo_validate(&res->backup->base,
476 			      res->func->backup_placement,
477 			      &ctx);
478 
479 	if (unlikely(ret != 0))
480 		goto out_no_validate;
481 
482 	return 0;
483 
484 out_no_validate:
485 	ttm_eu_backoff_reservation(ticket, &val_list);
486 out_no_reserve:
487 	ttm_bo_put(val_buf->bo);
488 	val_buf->bo = NULL;
489 	if (backup_dirty)
490 		vmw_bo_unreference(&res->backup);
491 
492 	return ret;
493 }
494 
495 /**
496  * vmw_resource_reserve - Reserve a resource for command submission
497  *
498  * @res:            The resource to reserve.
499  *
500  * This function takes the resource off the LRU list and make sure
501  * a backup buffer is present for guest-backed resources. However,
502  * the buffer may not be bound to the resource at this point.
503  *
504  */
505 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
506 			 bool no_backup)
507 {
508 	struct vmw_private *dev_priv = res->dev_priv;
509 	int ret;
510 
511 	spin_lock(&dev_priv->resource_lock);
512 	list_del_init(&res->lru_head);
513 	spin_unlock(&dev_priv->resource_lock);
514 
515 	if (res->func->needs_backup && res->backup == NULL &&
516 	    !no_backup) {
517 		ret = vmw_resource_buf_alloc(res, interruptible);
518 		if (unlikely(ret != 0)) {
519 			DRM_ERROR("Failed to allocate a backup buffer "
520 				  "of size %lu. bytes\n",
521 				  (unsigned long) res->backup_size);
522 			return ret;
523 		}
524 	}
525 
526 	return 0;
527 }
528 
529 /**
530  * vmw_resource_backoff_reservation - Unreserve and unreference a
531  *                                    backup buffer
532  *.
533  * @ticket:         The ww acquire ctx used for reservation.
534  * @val_buf:        Backup buffer information.
535  */
536 static void
537 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
538 				 struct ttm_validate_buffer *val_buf)
539 {
540 	struct list_head val_list;
541 
542 	if (likely(val_buf->bo == NULL))
543 		return;
544 
545 	INIT_LIST_HEAD(&val_list);
546 	list_add_tail(&val_buf->head, &val_list);
547 	ttm_eu_backoff_reservation(ticket, &val_list);
548 	ttm_bo_put(val_buf->bo);
549 	val_buf->bo = NULL;
550 }
551 
552 /**
553  * vmw_resource_do_evict - Evict a resource, and transfer its data
554  *                         to a backup buffer.
555  *
556  * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
557  * @res:            The resource to evict.
558  * @interruptible:  Whether to wait interruptible.
559  */
560 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
561 				 struct vmw_resource *res, bool interruptible)
562 {
563 	struct ttm_validate_buffer val_buf;
564 	const struct vmw_res_func *func = res->func;
565 	int ret;
566 
567 	BUG_ON(!func->may_evict);
568 
569 	val_buf.bo = NULL;
570 	val_buf.num_shared = 0;
571 	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
572 	if (unlikely(ret != 0))
573 		return ret;
574 
575 	if (unlikely(func->unbind != NULL &&
576 		     (!func->needs_backup || !list_empty(&res->mob_head)))) {
577 		ret = func->unbind(res, res->res_dirty, &val_buf);
578 		if (unlikely(ret != 0))
579 			goto out_no_unbind;
580 		list_del_init(&res->mob_head);
581 	}
582 	ret = func->destroy(res);
583 	res->backup_dirty = true;
584 	res->res_dirty = false;
585 out_no_unbind:
586 	vmw_resource_backoff_reservation(ticket, &val_buf);
587 
588 	return ret;
589 }
590 
591 
592 /**
593  * vmw_resource_validate - Make a resource up-to-date and visible
594  *                         to the device.
595  * @res: The resource to make visible to the device.
596  * @intr: Perform waits interruptible if possible.
597  *
598  * On succesful return, any backup DMA buffer pointed to by @res->backup will
599  * be reserved and validated.
600  * On hardware resource shortage, this function will repeatedly evict
601  * resources of the same type until the validation succeeds.
602  *
603  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
604  * on failure.
605  */
606 int vmw_resource_validate(struct vmw_resource *res, bool intr)
607 {
608 	int ret;
609 	struct vmw_resource *evict_res;
610 	struct vmw_private *dev_priv = res->dev_priv;
611 	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
612 	struct ttm_validate_buffer val_buf;
613 	unsigned err_count = 0;
614 
615 	if (!res->func->create)
616 		return 0;
617 
618 	val_buf.bo = NULL;
619 	val_buf.num_shared = 0;
620 	if (res->backup)
621 		val_buf.bo = &res->backup->base;
622 	do {
623 		ret = vmw_resource_do_validate(res, &val_buf);
624 		if (likely(ret != -EBUSY))
625 			break;
626 
627 		spin_lock(&dev_priv->resource_lock);
628 		if (list_empty(lru_list) || !res->func->may_evict) {
629 			DRM_ERROR("Out of device device resources "
630 				  "for %s.\n", res->func->type_name);
631 			ret = -EBUSY;
632 			spin_unlock(&dev_priv->resource_lock);
633 			break;
634 		}
635 
636 		evict_res = vmw_resource_reference
637 			(list_first_entry(lru_list, struct vmw_resource,
638 					  lru_head));
639 		list_del_init(&evict_res->lru_head);
640 
641 		spin_unlock(&dev_priv->resource_lock);
642 
643 		/* Trylock backup buffers with a NULL ticket. */
644 		ret = vmw_resource_do_evict(NULL, evict_res, intr);
645 		if (unlikely(ret != 0)) {
646 			spin_lock(&dev_priv->resource_lock);
647 			list_add_tail(&evict_res->lru_head, lru_list);
648 			spin_unlock(&dev_priv->resource_lock);
649 			if (ret == -ERESTARTSYS ||
650 			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
651 				vmw_resource_unreference(&evict_res);
652 				goto out_no_validate;
653 			}
654 		}
655 
656 		vmw_resource_unreference(&evict_res);
657 	} while (1);
658 
659 	if (unlikely(ret != 0))
660 		goto out_no_validate;
661 	else if (!res->func->needs_backup && res->backup) {
662 		list_del_init(&res->mob_head);
663 		vmw_bo_unreference(&res->backup);
664 	}
665 
666 	return 0;
667 
668 out_no_validate:
669 	return ret;
670 }
671 
672 
673 /**
674  * vmw_resource_unbind_list
675  *
676  * @vbo: Pointer to the current backing MOB.
677  *
678  * Evicts the Guest Backed hardware resource if the backup
679  * buffer is being moved out of MOB memory.
680  * Note that this function will not race with the resource
681  * validation code, since resource validation and eviction
682  * both require the backup buffer to be reserved.
683  */
684 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
685 {
686 
687 	struct vmw_resource *res, *next;
688 	struct ttm_validate_buffer val_buf = {
689 		.bo = &vbo->base,
690 		.num_shared = 0
691 	};
692 
693 	lockdep_assert_held(&vbo->base.resv->lock.base);
694 	list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
695 		if (!res->func->unbind)
696 			continue;
697 
698 		(void) res->func->unbind(res, res->res_dirty, &val_buf);
699 		res->backup_dirty = true;
700 		res->res_dirty = false;
701 		list_del_init(&res->mob_head);
702 	}
703 
704 	(void) ttm_bo_wait(&vbo->base, false, false);
705 }
706 
707 
708 /**
709  * vmw_query_readback_all - Read back cached query states
710  *
711  * @dx_query_mob: Buffer containing the DX query MOB
712  *
713  * Read back cached states from the device if they exist.  This function
714  * assumings binding_mutex is held.
715  */
716 int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
717 {
718 	struct vmw_resource *dx_query_ctx;
719 	struct vmw_private *dev_priv;
720 	struct {
721 		SVGA3dCmdHeader header;
722 		SVGA3dCmdDXReadbackAllQuery body;
723 	} *cmd;
724 
725 
726 	/* No query bound, so do nothing */
727 	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
728 		return 0;
729 
730 	dx_query_ctx = dx_query_mob->dx_query_ctx;
731 	dev_priv     = dx_query_ctx->dev_priv;
732 
733 	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id);
734 	if (unlikely(cmd == NULL))
735 		return -ENOMEM;
736 
737 	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
738 	cmd->header.size = sizeof(cmd->body);
739 	cmd->body.cid    = dx_query_ctx->id;
740 
741 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
742 
743 	/* Triggers a rebind the next time affected context is bound */
744 	dx_query_mob->dx_query_ctx = NULL;
745 
746 	return 0;
747 }
748 
749 
750 
751 /**
752  * vmw_query_move_notify - Read back cached query states
753  *
754  * @bo: The TTM buffer object about to move.
755  * @mem: The memory region @bo is moving to.
756  *
757  * Called before the query MOB is swapped out to read back cached query
758  * states from the device.
759  */
760 void vmw_query_move_notify(struct ttm_buffer_object *bo,
761 			   struct ttm_mem_reg *mem)
762 {
763 	struct vmw_buffer_object *dx_query_mob;
764 	struct ttm_bo_device *bdev = bo->bdev;
765 	struct vmw_private *dev_priv;
766 
767 
768 	dev_priv = container_of(bdev, struct vmw_private, bdev);
769 
770 	mutex_lock(&dev_priv->binding_mutex);
771 
772 	dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
773 	if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
774 		mutex_unlock(&dev_priv->binding_mutex);
775 		return;
776 	}
777 
778 	/* If BO is being moved from MOB to system memory */
779 	if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
780 		struct vmw_fence_obj *fence;
781 
782 		(void) vmw_query_readback_all(dx_query_mob);
783 		mutex_unlock(&dev_priv->binding_mutex);
784 
785 		/* Create a fence and attach the BO to it */
786 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
787 		vmw_bo_fence_single(bo, fence);
788 
789 		if (fence != NULL)
790 			vmw_fence_obj_unreference(&fence);
791 
792 		(void) ttm_bo_wait(bo, false, false);
793 	} else
794 		mutex_unlock(&dev_priv->binding_mutex);
795 
796 }
797 
798 /**
799  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
800  *
801  * @res:            The resource being queried.
802  */
803 bool vmw_resource_needs_backup(const struct vmw_resource *res)
804 {
805 	return res->func->needs_backup;
806 }
807 
808 /**
809  * vmw_resource_evict_type - Evict all resources of a specific type
810  *
811  * @dev_priv:       Pointer to a device private struct
812  * @type:           The resource type to evict
813  *
814  * To avoid thrashing starvation or as part of the hibernation sequence,
815  * try to evict all evictable resources of a specific type.
816  */
817 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
818 				    enum vmw_res_type type)
819 {
820 	struct list_head *lru_list = &dev_priv->res_lru[type];
821 	struct vmw_resource *evict_res;
822 	unsigned err_count = 0;
823 	int ret;
824 	struct ww_acquire_ctx ticket;
825 
826 	do {
827 		spin_lock(&dev_priv->resource_lock);
828 
829 		if (list_empty(lru_list))
830 			goto out_unlock;
831 
832 		evict_res = vmw_resource_reference(
833 			list_first_entry(lru_list, struct vmw_resource,
834 					 lru_head));
835 		list_del_init(&evict_res->lru_head);
836 		spin_unlock(&dev_priv->resource_lock);
837 
838 		/* Wait lock backup buffers with a ticket. */
839 		ret = vmw_resource_do_evict(&ticket, evict_res, false);
840 		if (unlikely(ret != 0)) {
841 			spin_lock(&dev_priv->resource_lock);
842 			list_add_tail(&evict_res->lru_head, lru_list);
843 			spin_unlock(&dev_priv->resource_lock);
844 			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
845 				vmw_resource_unreference(&evict_res);
846 				return;
847 			}
848 		}
849 
850 		vmw_resource_unreference(&evict_res);
851 	} while (1);
852 
853 out_unlock:
854 	spin_unlock(&dev_priv->resource_lock);
855 }
856 
857 /**
858  * vmw_resource_evict_all - Evict all evictable resources
859  *
860  * @dev_priv:       Pointer to a device private struct
861  *
862  * To avoid thrashing starvation or as part of the hibernation sequence,
863  * evict all evictable resources. In particular this means that all
864  * guest-backed resources that are registered with the device are
865  * evicted and the OTable becomes clean.
866  */
867 void vmw_resource_evict_all(struct vmw_private *dev_priv)
868 {
869 	enum vmw_res_type type;
870 
871 	mutex_lock(&dev_priv->cmdbuf_mutex);
872 
873 	for (type = 0; type < vmw_res_max; ++type)
874 		vmw_resource_evict_type(dev_priv, type);
875 
876 	mutex_unlock(&dev_priv->cmdbuf_mutex);
877 }
878 
879 /**
880  * vmw_resource_pin - Add a pin reference on a resource
881  *
882  * @res: The resource to add a pin reference on
883  *
884  * This function adds a pin reference, and if needed validates the resource.
885  * Having a pin reference means that the resource can never be evicted, and
886  * its id will never change as long as there is a pin reference.
887  * This function returns 0 on success and a negative error code on failure.
888  */
889 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
890 {
891 	struct ttm_operation_ctx ctx = { interruptible, false };
892 	struct vmw_private *dev_priv = res->dev_priv;
893 	int ret;
894 
895 	ttm_write_lock(&dev_priv->reservation_sem, interruptible);
896 	mutex_lock(&dev_priv->cmdbuf_mutex);
897 	ret = vmw_resource_reserve(res, interruptible, false);
898 	if (ret)
899 		goto out_no_reserve;
900 
901 	if (res->pin_count == 0) {
902 		struct vmw_buffer_object *vbo = NULL;
903 
904 		if (res->backup) {
905 			vbo = res->backup;
906 
907 			ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
908 			if (!vbo->pin_count) {
909 				ret = ttm_bo_validate
910 					(&vbo->base,
911 					 res->func->backup_placement,
912 					 &ctx);
913 				if (ret) {
914 					ttm_bo_unreserve(&vbo->base);
915 					goto out_no_validate;
916 				}
917 			}
918 
919 			/* Do we really need to pin the MOB as well? */
920 			vmw_bo_pin_reserved(vbo, true);
921 		}
922 		ret = vmw_resource_validate(res, interruptible);
923 		if (vbo)
924 			ttm_bo_unreserve(&vbo->base);
925 		if (ret)
926 			goto out_no_validate;
927 	}
928 	res->pin_count++;
929 
930 out_no_validate:
931 	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
932 out_no_reserve:
933 	mutex_unlock(&dev_priv->cmdbuf_mutex);
934 	ttm_write_unlock(&dev_priv->reservation_sem);
935 
936 	return ret;
937 }
938 
939 /**
940  * vmw_resource_unpin - Remove a pin reference from a resource
941  *
942  * @res: The resource to remove a pin reference from
943  *
944  * Having a pin reference means that the resource can never be evicted, and
945  * its id will never change as long as there is a pin reference.
946  */
947 void vmw_resource_unpin(struct vmw_resource *res)
948 {
949 	struct vmw_private *dev_priv = res->dev_priv;
950 	int ret;
951 
952 	(void) ttm_read_lock(&dev_priv->reservation_sem, false);
953 	mutex_lock(&dev_priv->cmdbuf_mutex);
954 
955 	ret = vmw_resource_reserve(res, false, true);
956 	WARN_ON(ret);
957 
958 	WARN_ON(res->pin_count == 0);
959 	if (--res->pin_count == 0 && res->backup) {
960 		struct vmw_buffer_object *vbo = res->backup;
961 
962 		(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
963 		vmw_bo_pin_reserved(vbo, false);
964 		ttm_bo_unreserve(&vbo->base);
965 	}
966 
967 	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
968 
969 	mutex_unlock(&dev_priv->cmdbuf_mutex);
970 	ttm_read_unlock(&dev_priv->reservation_sem);
971 }
972 
973 /**
974  * vmw_res_type - Return the resource type
975  *
976  * @res: Pointer to the resource
977  */
978 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
979 {
980 	return res->func->res_type;
981 }
982