1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <drm/drmP.h>
32 #include "vmwgfx_resource_priv.h"
33 #include "vmwgfx_binding.h"
34 
35 #define VMW_RES_EVICT_ERR_COUNT 10
36 
37 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
38 {
39 	kref_get(&res->kref);
40 	return res;
41 }
42 
43 struct vmw_resource *
44 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
45 {
46 	return kref_get_unless_zero(&res->kref) ? res : NULL;
47 }
48 
49 /**
50  * vmw_resource_release_id - release a resource id to the id manager.
51  *
52  * @res: Pointer to the resource.
53  *
54  * Release the resource id to the resource id manager and set it to -1
55  */
56 void vmw_resource_release_id(struct vmw_resource *res)
57 {
58 	struct vmw_private *dev_priv = res->dev_priv;
59 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
60 
61 	write_lock(&dev_priv->resource_lock);
62 	if (res->id != -1)
63 		idr_remove(idr, res->id);
64 	res->id = -1;
65 	write_unlock(&dev_priv->resource_lock);
66 }
67 
68 static void vmw_resource_release(struct kref *kref)
69 {
70 	struct vmw_resource *res =
71 	    container_of(kref, struct vmw_resource, kref);
72 	struct vmw_private *dev_priv = res->dev_priv;
73 	int id;
74 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
75 
76 	write_lock(&dev_priv->resource_lock);
77 	res->avail = false;
78 	list_del_init(&res->lru_head);
79 	write_unlock(&dev_priv->resource_lock);
80 	if (res->backup) {
81 		struct ttm_buffer_object *bo = &res->backup->base;
82 
83 		ttm_bo_reserve(bo, false, false, NULL);
84 		if (!list_empty(&res->mob_head) &&
85 		    res->func->unbind != NULL) {
86 			struct ttm_validate_buffer val_buf;
87 
88 			val_buf.bo = bo;
89 			val_buf.shared = false;
90 			res->func->unbind(res, false, &val_buf);
91 		}
92 		res->backup_dirty = false;
93 		list_del_init(&res->mob_head);
94 		ttm_bo_unreserve(bo);
95 		vmw_bo_unreference(&res->backup);
96 	}
97 
98 	if (likely(res->hw_destroy != NULL)) {
99 		mutex_lock(&dev_priv->binding_mutex);
100 		vmw_binding_res_list_kill(&res->binding_head);
101 		mutex_unlock(&dev_priv->binding_mutex);
102 		res->hw_destroy(res);
103 	}
104 
105 	id = res->id;
106 	if (res->res_free != NULL)
107 		res->res_free(res);
108 	else
109 		kfree(res);
110 
111 	write_lock(&dev_priv->resource_lock);
112 	if (id != -1)
113 		idr_remove(idr, id);
114 	write_unlock(&dev_priv->resource_lock);
115 }
116 
117 void vmw_resource_unreference(struct vmw_resource **p_res)
118 {
119 	struct vmw_resource *res = *p_res;
120 
121 	*p_res = NULL;
122 	kref_put(&res->kref, vmw_resource_release);
123 }
124 
125 
126 /**
127  * vmw_resource_alloc_id - release a resource id to the id manager.
128  *
129  * @res: Pointer to the resource.
130  *
131  * Allocate the lowest free resource from the resource manager, and set
132  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
133  */
134 int vmw_resource_alloc_id(struct vmw_resource *res)
135 {
136 	struct vmw_private *dev_priv = res->dev_priv;
137 	int ret;
138 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
139 
140 	BUG_ON(res->id != -1);
141 
142 	idr_preload(GFP_KERNEL);
143 	write_lock(&dev_priv->resource_lock);
144 
145 	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
146 	if (ret >= 0)
147 		res->id = ret;
148 
149 	write_unlock(&dev_priv->resource_lock);
150 	idr_preload_end();
151 	return ret < 0 ? ret : 0;
152 }
153 
154 /**
155  * vmw_resource_init - initialize a struct vmw_resource
156  *
157  * @dev_priv:       Pointer to a device private struct.
158  * @res:            The struct vmw_resource to initialize.
159  * @obj_type:       Resource object type.
160  * @delay_id:       Boolean whether to defer device id allocation until
161  *                  the first validation.
162  * @res_free:       Resource destructor.
163  * @func:           Resource function table.
164  */
165 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
166 		      bool delay_id,
167 		      void (*res_free) (struct vmw_resource *res),
168 		      const struct vmw_res_func *func)
169 {
170 	kref_init(&res->kref);
171 	res->hw_destroy = NULL;
172 	res->res_free = res_free;
173 	res->avail = false;
174 	res->dev_priv = dev_priv;
175 	res->func = func;
176 	INIT_LIST_HEAD(&res->lru_head);
177 	INIT_LIST_HEAD(&res->mob_head);
178 	INIT_LIST_HEAD(&res->binding_head);
179 	res->id = -1;
180 	res->backup = NULL;
181 	res->backup_offset = 0;
182 	res->backup_dirty = false;
183 	res->res_dirty = false;
184 	if (delay_id)
185 		return 0;
186 	else
187 		return vmw_resource_alloc_id(res);
188 }
189 
190 /**
191  * vmw_resource_activate
192  *
193  * @res:        Pointer to the newly created resource
194  * @hw_destroy: Destroy function. NULL if none.
195  *
196  * Activate a resource after the hardware has been made aware of it.
197  * Set tye destroy function to @destroy. Typically this frees the
198  * resource and destroys the hardware resources associated with it.
199  * Activate basically means that the function vmw_resource_lookup will
200  * find it.
201  */
202 void vmw_resource_activate(struct vmw_resource *res,
203 			   void (*hw_destroy) (struct vmw_resource *))
204 {
205 	struct vmw_private *dev_priv = res->dev_priv;
206 
207 	write_lock(&dev_priv->resource_lock);
208 	res->avail = true;
209 	res->hw_destroy = hw_destroy;
210 	write_unlock(&dev_priv->resource_lock);
211 }
212 
213 /**
214  * vmw_user_resource_lookup_handle - lookup a struct resource from a
215  * TTM user-space handle and perform basic type checks
216  *
217  * @dev_priv:     Pointer to a device private struct
218  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
219  * @handle:       The TTM user-space handle
220  * @converter:    Pointer to an object describing the resource type
221  * @p_res:        On successful return the location pointed to will contain
222  *                a pointer to a refcounted struct vmw_resource.
223  *
224  * If the handle can't be found or is associated with an incorrect resource
225  * type, -EINVAL will be returned.
226  */
227 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
228 				    struct ttm_object_file *tfile,
229 				    uint32_t handle,
230 				    const struct vmw_user_resource_conv
231 				    *converter,
232 				    struct vmw_resource **p_res)
233 {
234 	struct ttm_base_object *base;
235 	struct vmw_resource *res;
236 	int ret = -EINVAL;
237 
238 	base = ttm_base_object_lookup(tfile, handle);
239 	if (unlikely(base == NULL))
240 		return -EINVAL;
241 
242 	if (unlikely(ttm_base_object_type(base) != converter->object_type))
243 		goto out_bad_resource;
244 
245 	res = converter->base_obj_to_res(base);
246 
247 	read_lock(&dev_priv->resource_lock);
248 	if (!res->avail || res->res_free != converter->res_free) {
249 		read_unlock(&dev_priv->resource_lock);
250 		goto out_bad_resource;
251 	}
252 
253 	kref_get(&res->kref);
254 	read_unlock(&dev_priv->resource_lock);
255 
256 	*p_res = res;
257 	ret = 0;
258 
259 out_bad_resource:
260 	ttm_base_object_unref(&base);
261 
262 	return ret;
263 }
264 
265 /**
266  * Helper function that looks either a surface or bo.
267  *
268  * The pointer this pointed at by out_surf and out_buf needs to be null.
269  */
270 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
271 			   struct ttm_object_file *tfile,
272 			   uint32_t handle,
273 			   struct vmw_surface **out_surf,
274 			   struct vmw_buffer_object **out_buf)
275 {
276 	struct vmw_resource *res;
277 	int ret;
278 
279 	BUG_ON(*out_surf || *out_buf);
280 
281 	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
282 					      user_surface_converter,
283 					      &res);
284 	if (!ret) {
285 		*out_surf = vmw_res_to_srf(res);
286 		return 0;
287 	}
288 
289 	*out_surf = NULL;
290 	ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
291 	return ret;
292 }
293 
294 /**
295  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
296  *
297  * @res:            The resource for which to allocate a backup buffer.
298  * @interruptible:  Whether any sleeps during allocation should be
299  *                  performed while interruptible.
300  */
301 static int vmw_resource_buf_alloc(struct vmw_resource *res,
302 				  bool interruptible)
303 {
304 	unsigned long size =
305 		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
306 	struct vmw_buffer_object *backup;
307 	int ret;
308 
309 	if (likely(res->backup)) {
310 		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
311 		return 0;
312 	}
313 
314 	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
315 	if (unlikely(!backup))
316 		return -ENOMEM;
317 
318 	ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
319 			      res->func->backup_placement,
320 			      interruptible,
321 			      &vmw_bo_bo_free);
322 	if (unlikely(ret != 0))
323 		goto out_no_bo;
324 
325 	res->backup = backup;
326 
327 out_no_bo:
328 	return ret;
329 }
330 
331 /**
332  * vmw_resource_do_validate - Make a resource up-to-date and visible
333  *                            to the device.
334  *
335  * @res:            The resource to make visible to the device.
336  * @val_buf:        Information about a buffer possibly
337  *                  containing backup data if a bind operation is needed.
338  *
339  * On hardware resource shortage, this function returns -EBUSY and
340  * should be retried once resources have been freed up.
341  */
342 static int vmw_resource_do_validate(struct vmw_resource *res,
343 				    struct ttm_validate_buffer *val_buf)
344 {
345 	int ret = 0;
346 	const struct vmw_res_func *func = res->func;
347 
348 	if (unlikely(res->id == -1)) {
349 		ret = func->create(res);
350 		if (unlikely(ret != 0))
351 			return ret;
352 	}
353 
354 	if (func->bind &&
355 	    ((func->needs_backup && list_empty(&res->mob_head) &&
356 	      val_buf->bo != NULL) ||
357 	     (!func->needs_backup && val_buf->bo != NULL))) {
358 		ret = func->bind(res, val_buf);
359 		if (unlikely(ret != 0))
360 			goto out_bind_failed;
361 		if (func->needs_backup)
362 			list_add_tail(&res->mob_head, &res->backup->res_list);
363 	}
364 
365 	/*
366 	 * Only do this on write operations, and move to
367 	 * vmw_resource_unreserve if it can be called after
368 	 * backup buffers have been unreserved. Otherwise
369 	 * sort out locking.
370 	 */
371 	res->res_dirty = true;
372 
373 	return 0;
374 
375 out_bind_failed:
376 	func->destroy(res);
377 
378 	return ret;
379 }
380 
381 /**
382  * vmw_resource_unreserve - Unreserve a resource previously reserved for
383  * command submission.
384  *
385  * @res:               Pointer to the struct vmw_resource to unreserve.
386  * @switch_backup:     Backup buffer has been switched.
387  * @new_backup:        Pointer to new backup buffer if command submission
388  *                     switched. May be NULL.
389  * @new_backup_offset: New backup offset if @switch_backup is true.
390  *
391  * Currently unreserving a resource means putting it back on the device's
392  * resource lru list, so that it can be evicted if necessary.
393  */
394 void vmw_resource_unreserve(struct vmw_resource *res,
395 			    bool switch_backup,
396 			    struct vmw_buffer_object *new_backup,
397 			    unsigned long new_backup_offset)
398 {
399 	struct vmw_private *dev_priv = res->dev_priv;
400 
401 	if (!list_empty(&res->lru_head))
402 		return;
403 
404 	if (switch_backup && new_backup != res->backup) {
405 		if (res->backup) {
406 			lockdep_assert_held(&res->backup->base.resv->lock.base);
407 			list_del_init(&res->mob_head);
408 			vmw_bo_unreference(&res->backup);
409 		}
410 
411 		if (new_backup) {
412 			res->backup = vmw_bo_reference(new_backup);
413 			lockdep_assert_held(&new_backup->base.resv->lock.base);
414 			list_add_tail(&res->mob_head, &new_backup->res_list);
415 		} else {
416 			res->backup = NULL;
417 		}
418 	}
419 	if (switch_backup)
420 		res->backup_offset = new_backup_offset;
421 
422 	if (!res->func->may_evict || res->id == -1 || res->pin_count)
423 		return;
424 
425 	write_lock(&dev_priv->resource_lock);
426 	list_add_tail(&res->lru_head,
427 		      &res->dev_priv->res_lru[res->func->res_type]);
428 	write_unlock(&dev_priv->resource_lock);
429 }
430 
431 /**
432  * vmw_resource_check_buffer - Check whether a backup buffer is needed
433  *                             for a resource and in that case, allocate
434  *                             one, reserve and validate it.
435  *
436  * @ticket:         The ww aqcquire context to use, or NULL if trylocking.
437  * @res:            The resource for which to allocate a backup buffer.
438  * @interruptible:  Whether any sleeps during allocation should be
439  *                  performed while interruptible.
440  * @val_buf:        On successful return contains data about the
441  *                  reserved and validated backup buffer.
442  */
443 static int
444 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
445 			  struct vmw_resource *res,
446 			  bool interruptible,
447 			  struct ttm_validate_buffer *val_buf)
448 {
449 	struct ttm_operation_ctx ctx = { true, false };
450 	struct list_head val_list;
451 	bool backup_dirty = false;
452 	int ret;
453 
454 	if (unlikely(res->backup == NULL)) {
455 		ret = vmw_resource_buf_alloc(res, interruptible);
456 		if (unlikely(ret != 0))
457 			return ret;
458 	}
459 
460 	INIT_LIST_HEAD(&val_list);
461 	val_buf->bo = ttm_bo_reference(&res->backup->base);
462 	val_buf->shared = false;
463 	list_add_tail(&val_buf->head, &val_list);
464 	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
465 	if (unlikely(ret != 0))
466 		goto out_no_reserve;
467 
468 	if (res->func->needs_backup && list_empty(&res->mob_head))
469 		return 0;
470 
471 	backup_dirty = res->backup_dirty;
472 	ret = ttm_bo_validate(&res->backup->base,
473 			      res->func->backup_placement,
474 			      &ctx);
475 
476 	if (unlikely(ret != 0))
477 		goto out_no_validate;
478 
479 	return 0;
480 
481 out_no_validate:
482 	ttm_eu_backoff_reservation(ticket, &val_list);
483 out_no_reserve:
484 	ttm_bo_unref(&val_buf->bo);
485 	if (backup_dirty)
486 		vmw_bo_unreference(&res->backup);
487 
488 	return ret;
489 }
490 
491 /**
492  * vmw_resource_reserve - Reserve a resource for command submission
493  *
494  * @res:            The resource to reserve.
495  *
496  * This function takes the resource off the LRU list and make sure
497  * a backup buffer is present for guest-backed resources. However,
498  * the buffer may not be bound to the resource at this point.
499  *
500  */
501 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
502 			 bool no_backup)
503 {
504 	struct vmw_private *dev_priv = res->dev_priv;
505 	int ret;
506 
507 	write_lock(&dev_priv->resource_lock);
508 	list_del_init(&res->lru_head);
509 	write_unlock(&dev_priv->resource_lock);
510 
511 	if (res->func->needs_backup && res->backup == NULL &&
512 	    !no_backup) {
513 		ret = vmw_resource_buf_alloc(res, interruptible);
514 		if (unlikely(ret != 0)) {
515 			DRM_ERROR("Failed to allocate a backup buffer "
516 				  "of size %lu. bytes\n",
517 				  (unsigned long) res->backup_size);
518 			return ret;
519 		}
520 	}
521 
522 	return 0;
523 }
524 
525 /**
526  * vmw_resource_backoff_reservation - Unreserve and unreference a
527  *                                    backup buffer
528  *.
529  * @ticket:         The ww acquire ctx used for reservation.
530  * @val_buf:        Backup buffer information.
531  */
532 static void
533 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
534 				 struct ttm_validate_buffer *val_buf)
535 {
536 	struct list_head val_list;
537 
538 	if (likely(val_buf->bo == NULL))
539 		return;
540 
541 	INIT_LIST_HEAD(&val_list);
542 	list_add_tail(&val_buf->head, &val_list);
543 	ttm_eu_backoff_reservation(ticket, &val_list);
544 	ttm_bo_unref(&val_buf->bo);
545 }
546 
547 /**
548  * vmw_resource_do_evict - Evict a resource, and transfer its data
549  *                         to a backup buffer.
550  *
551  * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
552  * @res:            The resource to evict.
553  * @interruptible:  Whether to wait interruptible.
554  */
555 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
556 				 struct vmw_resource *res, bool interruptible)
557 {
558 	struct ttm_validate_buffer val_buf;
559 	const struct vmw_res_func *func = res->func;
560 	int ret;
561 
562 	BUG_ON(!func->may_evict);
563 
564 	val_buf.bo = NULL;
565 	val_buf.shared = false;
566 	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
567 	if (unlikely(ret != 0))
568 		return ret;
569 
570 	if (unlikely(func->unbind != NULL &&
571 		     (!func->needs_backup || !list_empty(&res->mob_head)))) {
572 		ret = func->unbind(res, res->res_dirty, &val_buf);
573 		if (unlikely(ret != 0))
574 			goto out_no_unbind;
575 		list_del_init(&res->mob_head);
576 	}
577 	ret = func->destroy(res);
578 	res->backup_dirty = true;
579 	res->res_dirty = false;
580 out_no_unbind:
581 	vmw_resource_backoff_reservation(ticket, &val_buf);
582 
583 	return ret;
584 }
585 
586 
587 /**
588  * vmw_resource_validate - Make a resource up-to-date and visible
589  *                         to the device.
590  *
591  * @res:            The resource to make visible to the device.
592  *
593  * On succesful return, any backup DMA buffer pointed to by @res->backup will
594  * be reserved and validated.
595  * On hardware resource shortage, this function will repeatedly evict
596  * resources of the same type until the validation succeeds.
597  */
598 int vmw_resource_validate(struct vmw_resource *res)
599 {
600 	int ret;
601 	struct vmw_resource *evict_res;
602 	struct vmw_private *dev_priv = res->dev_priv;
603 	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
604 	struct ttm_validate_buffer val_buf;
605 	unsigned err_count = 0;
606 
607 	if (!res->func->create)
608 		return 0;
609 
610 	val_buf.bo = NULL;
611 	val_buf.shared = false;
612 	if (res->backup)
613 		val_buf.bo = &res->backup->base;
614 	do {
615 		ret = vmw_resource_do_validate(res, &val_buf);
616 		if (likely(ret != -EBUSY))
617 			break;
618 
619 		write_lock(&dev_priv->resource_lock);
620 		if (list_empty(lru_list) || !res->func->may_evict) {
621 			DRM_ERROR("Out of device device resources "
622 				  "for %s.\n", res->func->type_name);
623 			ret = -EBUSY;
624 			write_unlock(&dev_priv->resource_lock);
625 			break;
626 		}
627 
628 		evict_res = vmw_resource_reference
629 			(list_first_entry(lru_list, struct vmw_resource,
630 					  lru_head));
631 		list_del_init(&evict_res->lru_head);
632 
633 		write_unlock(&dev_priv->resource_lock);
634 
635 		/* Trylock backup buffers with a NULL ticket. */
636 		ret = vmw_resource_do_evict(NULL, evict_res, true);
637 		if (unlikely(ret != 0)) {
638 			write_lock(&dev_priv->resource_lock);
639 			list_add_tail(&evict_res->lru_head, lru_list);
640 			write_unlock(&dev_priv->resource_lock);
641 			if (ret == -ERESTARTSYS ||
642 			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
643 				vmw_resource_unreference(&evict_res);
644 				goto out_no_validate;
645 			}
646 		}
647 
648 		vmw_resource_unreference(&evict_res);
649 	} while (1);
650 
651 	if (unlikely(ret != 0))
652 		goto out_no_validate;
653 	else if (!res->func->needs_backup && res->backup) {
654 		list_del_init(&res->mob_head);
655 		vmw_bo_unreference(&res->backup);
656 	}
657 
658 	return 0;
659 
660 out_no_validate:
661 	return ret;
662 }
663 
664 
665 /**
666  * vmw_resource_unbind_list
667  *
668  * @vbo: Pointer to the current backing MOB.
669  *
670  * Evicts the Guest Backed hardware resource if the backup
671  * buffer is being moved out of MOB memory.
672  * Note that this function will not race with the resource
673  * validation code, since resource validation and eviction
674  * both require the backup buffer to be reserved.
675  */
676 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
677 {
678 
679 	struct vmw_resource *res, *next;
680 	struct ttm_validate_buffer val_buf = {
681 		.bo = &vbo->base,
682 		.shared = false
683 	};
684 
685 	lockdep_assert_held(&vbo->base.resv->lock.base);
686 	list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
687 		if (!res->func->unbind)
688 			continue;
689 
690 		(void) res->func->unbind(res, true, &val_buf);
691 		res->backup_dirty = true;
692 		res->res_dirty = false;
693 		list_del_init(&res->mob_head);
694 	}
695 
696 	(void) ttm_bo_wait(&vbo->base, false, false);
697 }
698 
699 
700 /**
701  * vmw_query_readback_all - Read back cached query states
702  *
703  * @dx_query_mob: Buffer containing the DX query MOB
704  *
705  * Read back cached states from the device if they exist.  This function
706  * assumings binding_mutex is held.
707  */
708 int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
709 {
710 	struct vmw_resource *dx_query_ctx;
711 	struct vmw_private *dev_priv;
712 	struct {
713 		SVGA3dCmdHeader header;
714 		SVGA3dCmdDXReadbackAllQuery body;
715 	} *cmd;
716 
717 
718 	/* No query bound, so do nothing */
719 	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
720 		return 0;
721 
722 	dx_query_ctx = dx_query_mob->dx_query_ctx;
723 	dev_priv     = dx_query_ctx->dev_priv;
724 
725 	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
726 	if (unlikely(cmd == NULL)) {
727 		DRM_ERROR("Failed reserving FIFO space for "
728 			  "query MOB read back.\n");
729 		return -ENOMEM;
730 	}
731 
732 	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
733 	cmd->header.size = sizeof(cmd->body);
734 	cmd->body.cid    = dx_query_ctx->id;
735 
736 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
737 
738 	/* Triggers a rebind the next time affected context is bound */
739 	dx_query_mob->dx_query_ctx = NULL;
740 
741 	return 0;
742 }
743 
744 
745 
746 /**
747  * vmw_query_move_notify - Read back cached query states
748  *
749  * @bo: The TTM buffer object about to move.
750  * @mem: The memory region @bo is moving to.
751  *
752  * Called before the query MOB is swapped out to read back cached query
753  * states from the device.
754  */
755 void vmw_query_move_notify(struct ttm_buffer_object *bo,
756 			   struct ttm_mem_reg *mem)
757 {
758 	struct vmw_buffer_object *dx_query_mob;
759 	struct ttm_bo_device *bdev = bo->bdev;
760 	struct vmw_private *dev_priv;
761 
762 
763 	dev_priv = container_of(bdev, struct vmw_private, bdev);
764 
765 	mutex_lock(&dev_priv->binding_mutex);
766 
767 	dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
768 	if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
769 		mutex_unlock(&dev_priv->binding_mutex);
770 		return;
771 	}
772 
773 	/* If BO is being moved from MOB to system memory */
774 	if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
775 		struct vmw_fence_obj *fence;
776 
777 		(void) vmw_query_readback_all(dx_query_mob);
778 		mutex_unlock(&dev_priv->binding_mutex);
779 
780 		/* Create a fence and attach the BO to it */
781 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
782 		vmw_bo_fence_single(bo, fence);
783 
784 		if (fence != NULL)
785 			vmw_fence_obj_unreference(&fence);
786 
787 		(void) ttm_bo_wait(bo, false, false);
788 	} else
789 		mutex_unlock(&dev_priv->binding_mutex);
790 
791 }
792 
793 /**
794  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
795  *
796  * @res:            The resource being queried.
797  */
798 bool vmw_resource_needs_backup(const struct vmw_resource *res)
799 {
800 	return res->func->needs_backup;
801 }
802 
803 /**
804  * vmw_resource_evict_type - Evict all resources of a specific type
805  *
806  * @dev_priv:       Pointer to a device private struct
807  * @type:           The resource type to evict
808  *
809  * To avoid thrashing starvation or as part of the hibernation sequence,
810  * try to evict all evictable resources of a specific type.
811  */
812 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
813 				    enum vmw_res_type type)
814 {
815 	struct list_head *lru_list = &dev_priv->res_lru[type];
816 	struct vmw_resource *evict_res;
817 	unsigned err_count = 0;
818 	int ret;
819 	struct ww_acquire_ctx ticket;
820 
821 	do {
822 		write_lock(&dev_priv->resource_lock);
823 
824 		if (list_empty(lru_list))
825 			goto out_unlock;
826 
827 		evict_res = vmw_resource_reference(
828 			list_first_entry(lru_list, struct vmw_resource,
829 					 lru_head));
830 		list_del_init(&evict_res->lru_head);
831 		write_unlock(&dev_priv->resource_lock);
832 
833 		/* Wait lock backup buffers with a ticket. */
834 		ret = vmw_resource_do_evict(&ticket, evict_res, false);
835 		if (unlikely(ret != 0)) {
836 			write_lock(&dev_priv->resource_lock);
837 			list_add_tail(&evict_res->lru_head, lru_list);
838 			write_unlock(&dev_priv->resource_lock);
839 			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
840 				vmw_resource_unreference(&evict_res);
841 				return;
842 			}
843 		}
844 
845 		vmw_resource_unreference(&evict_res);
846 	} while (1);
847 
848 out_unlock:
849 	write_unlock(&dev_priv->resource_lock);
850 }
851 
852 /**
853  * vmw_resource_evict_all - Evict all evictable resources
854  *
855  * @dev_priv:       Pointer to a device private struct
856  *
857  * To avoid thrashing starvation or as part of the hibernation sequence,
858  * evict all evictable resources. In particular this means that all
859  * guest-backed resources that are registered with the device are
860  * evicted and the OTable becomes clean.
861  */
862 void vmw_resource_evict_all(struct vmw_private *dev_priv)
863 {
864 	enum vmw_res_type type;
865 
866 	mutex_lock(&dev_priv->cmdbuf_mutex);
867 
868 	for (type = 0; type < vmw_res_max; ++type)
869 		vmw_resource_evict_type(dev_priv, type);
870 
871 	mutex_unlock(&dev_priv->cmdbuf_mutex);
872 }
873 
874 /**
875  * vmw_resource_pin - Add a pin reference on a resource
876  *
877  * @res: The resource to add a pin reference on
878  *
879  * This function adds a pin reference, and if needed validates the resource.
880  * Having a pin reference means that the resource can never be evicted, and
881  * its id will never change as long as there is a pin reference.
882  * This function returns 0 on success and a negative error code on failure.
883  */
884 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
885 {
886 	struct ttm_operation_ctx ctx = { interruptible, false };
887 	struct vmw_private *dev_priv = res->dev_priv;
888 	int ret;
889 
890 	ttm_write_lock(&dev_priv->reservation_sem, interruptible);
891 	mutex_lock(&dev_priv->cmdbuf_mutex);
892 	ret = vmw_resource_reserve(res, interruptible, false);
893 	if (ret)
894 		goto out_no_reserve;
895 
896 	if (res->pin_count == 0) {
897 		struct vmw_buffer_object *vbo = NULL;
898 
899 		if (res->backup) {
900 			vbo = res->backup;
901 
902 			ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
903 			if (!vbo->pin_count) {
904 				ret = ttm_bo_validate
905 					(&vbo->base,
906 					 res->func->backup_placement,
907 					 &ctx);
908 				if (ret) {
909 					ttm_bo_unreserve(&vbo->base);
910 					goto out_no_validate;
911 				}
912 			}
913 
914 			/* Do we really need to pin the MOB as well? */
915 			vmw_bo_pin_reserved(vbo, true);
916 		}
917 		ret = vmw_resource_validate(res);
918 		if (vbo)
919 			ttm_bo_unreserve(&vbo->base);
920 		if (ret)
921 			goto out_no_validate;
922 	}
923 	res->pin_count++;
924 
925 out_no_validate:
926 	vmw_resource_unreserve(res, false, NULL, 0UL);
927 out_no_reserve:
928 	mutex_unlock(&dev_priv->cmdbuf_mutex);
929 	ttm_write_unlock(&dev_priv->reservation_sem);
930 
931 	return ret;
932 }
933 
934 /**
935  * vmw_resource_unpin - Remove a pin reference from a resource
936  *
937  * @res: The resource to remove a pin reference from
938  *
939  * Having a pin reference means that the resource can never be evicted, and
940  * its id will never change as long as there is a pin reference.
941  */
942 void vmw_resource_unpin(struct vmw_resource *res)
943 {
944 	struct vmw_private *dev_priv = res->dev_priv;
945 	int ret;
946 
947 	(void) ttm_read_lock(&dev_priv->reservation_sem, false);
948 	mutex_lock(&dev_priv->cmdbuf_mutex);
949 
950 	ret = vmw_resource_reserve(res, false, true);
951 	WARN_ON(ret);
952 
953 	WARN_ON(res->pin_count == 0);
954 	if (--res->pin_count == 0 && res->backup) {
955 		struct vmw_buffer_object *vbo = res->backup;
956 
957 		(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
958 		vmw_bo_pin_reserved(vbo, false);
959 		ttm_bo_unreserve(&vbo->base);
960 	}
961 
962 	vmw_resource_unreserve(res, false, NULL, 0UL);
963 
964 	mutex_unlock(&dev_priv->cmdbuf_mutex);
965 	ttm_read_unlock(&dev_priv->reservation_sem);
966 }
967 
968 /**
969  * vmw_res_type - Return the resource type
970  *
971  * @res: Pointer to the resource
972  */
973 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
974 {
975 	return res->func->res_type;
976 }
977