1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/ttm/ttm_placement.h>
29 
30 #include "vmwgfx_resource_priv.h"
31 #include "vmwgfx_binding.h"
32 #include "vmwgfx_drv.h"
33 
34 #define VMW_RES_EVICT_ERR_COUNT 10
35 
36 /**
37  * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
38  * @res: The resource
39  */
40 void vmw_resource_mob_attach(struct vmw_resource *res)
41 {
42 	struct vmw_buffer_object *backup = res->backup;
43 	struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
44 
45 	dma_resv_assert_held(res->backup->base.base.resv);
46 	res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
47 		res->func->prio;
48 
49 	while (*new) {
50 		struct vmw_resource *this =
51 			container_of(*new, struct vmw_resource, mob_node);
52 
53 		parent = *new;
54 		new = (res->backup_offset < this->backup_offset) ?
55 			&((*new)->rb_left) : &((*new)->rb_right);
56 	}
57 
58 	rb_link_node(&res->mob_node, parent, new);
59 	rb_insert_color(&res->mob_node, &backup->res_tree);
60 
61 	vmw_bo_prio_add(backup, res->used_prio);
62 }
63 
64 /**
65  * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
66  * @res: The resource
67  */
68 void vmw_resource_mob_detach(struct vmw_resource *res)
69 {
70 	struct vmw_buffer_object *backup = res->backup;
71 
72 	dma_resv_assert_held(backup->base.base.resv);
73 	if (vmw_resource_mob_attached(res)) {
74 		rb_erase(&res->mob_node, &backup->res_tree);
75 		RB_CLEAR_NODE(&res->mob_node);
76 		vmw_bo_prio_del(backup, res->used_prio);
77 	}
78 }
79 
80 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
81 {
82 	kref_get(&res->kref);
83 	return res;
84 }
85 
86 struct vmw_resource *
87 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
88 {
89 	return kref_get_unless_zero(&res->kref) ? res : NULL;
90 }
91 
92 /**
93  * vmw_resource_release_id - release a resource id to the id manager.
94  *
95  * @res: Pointer to the resource.
96  *
97  * Release the resource id to the resource id manager and set it to -1
98  */
99 void vmw_resource_release_id(struct vmw_resource *res)
100 {
101 	struct vmw_private *dev_priv = res->dev_priv;
102 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
103 
104 	spin_lock(&dev_priv->resource_lock);
105 	if (res->id != -1)
106 		idr_remove(idr, res->id);
107 	res->id = -1;
108 	spin_unlock(&dev_priv->resource_lock);
109 }
110 
111 static void vmw_resource_release(struct kref *kref)
112 {
113 	struct vmw_resource *res =
114 	    container_of(kref, struct vmw_resource, kref);
115 	struct vmw_private *dev_priv = res->dev_priv;
116 	int id;
117 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
118 
119 	spin_lock(&dev_priv->resource_lock);
120 	list_del_init(&res->lru_head);
121 	spin_unlock(&dev_priv->resource_lock);
122 	if (res->backup) {
123 		struct ttm_buffer_object *bo = &res->backup->base;
124 
125 		ttm_bo_reserve(bo, false, false, NULL);
126 		if (vmw_resource_mob_attached(res) &&
127 		    res->func->unbind != NULL) {
128 			struct ttm_validate_buffer val_buf;
129 
130 			val_buf.bo = bo;
131 			val_buf.num_shared = 0;
132 			res->func->unbind(res, false, &val_buf);
133 		}
134 		res->backup_dirty = false;
135 		vmw_resource_mob_detach(res);
136 		if (res->dirty)
137 			res->func->dirty_free(res);
138 		if (res->coherent)
139 			vmw_bo_dirty_release(res->backup);
140 		ttm_bo_unreserve(bo);
141 		vmw_bo_unreference(&res->backup);
142 	}
143 
144 	if (likely(res->hw_destroy != NULL)) {
145 		mutex_lock(&dev_priv->binding_mutex);
146 		vmw_binding_res_list_kill(&res->binding_head);
147 		mutex_unlock(&dev_priv->binding_mutex);
148 		res->hw_destroy(res);
149 	}
150 
151 	id = res->id;
152 	if (res->res_free != NULL)
153 		res->res_free(res);
154 	else
155 		kfree(res);
156 
157 	spin_lock(&dev_priv->resource_lock);
158 	if (id != -1)
159 		idr_remove(idr, id);
160 	spin_unlock(&dev_priv->resource_lock);
161 }
162 
163 void vmw_resource_unreference(struct vmw_resource **p_res)
164 {
165 	struct vmw_resource *res = *p_res;
166 
167 	*p_res = NULL;
168 	kref_put(&res->kref, vmw_resource_release);
169 }
170 
171 
172 /**
173  * vmw_resource_alloc_id - release a resource id to the id manager.
174  *
175  * @res: Pointer to the resource.
176  *
177  * Allocate the lowest free resource from the resource manager, and set
178  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
179  */
180 int vmw_resource_alloc_id(struct vmw_resource *res)
181 {
182 	struct vmw_private *dev_priv = res->dev_priv;
183 	int ret;
184 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
185 
186 	BUG_ON(res->id != -1);
187 
188 	idr_preload(GFP_KERNEL);
189 	spin_lock(&dev_priv->resource_lock);
190 
191 	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
192 	if (ret >= 0)
193 		res->id = ret;
194 
195 	spin_unlock(&dev_priv->resource_lock);
196 	idr_preload_end();
197 	return ret < 0 ? ret : 0;
198 }
199 
200 /**
201  * vmw_resource_init - initialize a struct vmw_resource
202  *
203  * @dev_priv:       Pointer to a device private struct.
204  * @res:            The struct vmw_resource to initialize.
205  * @delay_id:       Boolean whether to defer device id allocation until
206  *                  the first validation.
207  * @res_free:       Resource destructor.
208  * @func:           Resource function table.
209  */
210 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
211 		      bool delay_id,
212 		      void (*res_free) (struct vmw_resource *res),
213 		      const struct vmw_res_func *func)
214 {
215 	kref_init(&res->kref);
216 	res->hw_destroy = NULL;
217 	res->res_free = res_free;
218 	res->dev_priv = dev_priv;
219 	res->func = func;
220 	RB_CLEAR_NODE(&res->mob_node);
221 	INIT_LIST_HEAD(&res->lru_head);
222 	INIT_LIST_HEAD(&res->binding_head);
223 	res->id = -1;
224 	res->backup = NULL;
225 	res->backup_offset = 0;
226 	res->backup_dirty = false;
227 	res->res_dirty = false;
228 	res->coherent = false;
229 	res->used_prio = 3;
230 	res->dirty = NULL;
231 	if (delay_id)
232 		return 0;
233 	else
234 		return vmw_resource_alloc_id(res);
235 }
236 
237 
238 /**
239  * vmw_user_resource_lookup_handle - lookup a struct resource from a
240  * TTM user-space handle and perform basic type checks
241  *
242  * @dev_priv:     Pointer to a device private struct
243  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
244  * @handle:       The TTM user-space handle
245  * @converter:    Pointer to an object describing the resource type
246  * @p_res:        On successful return the location pointed to will contain
247  *                a pointer to a refcounted struct vmw_resource.
248  *
249  * If the handle can't be found or is associated with an incorrect resource
250  * type, -EINVAL will be returned.
251  */
252 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
253 				    struct ttm_object_file *tfile,
254 				    uint32_t handle,
255 				    const struct vmw_user_resource_conv
256 				    *converter,
257 				    struct vmw_resource **p_res)
258 {
259 	struct ttm_base_object *base;
260 	struct vmw_resource *res;
261 	int ret = -EINVAL;
262 
263 	base = ttm_base_object_lookup(tfile, handle);
264 	if (unlikely(base == NULL))
265 		return -EINVAL;
266 
267 	if (unlikely(ttm_base_object_type(base) != converter->object_type))
268 		goto out_bad_resource;
269 
270 	res = converter->base_obj_to_res(base);
271 	kref_get(&res->kref);
272 
273 	*p_res = res;
274 	ret = 0;
275 
276 out_bad_resource:
277 	ttm_base_object_unref(&base);
278 
279 	return ret;
280 }
281 
282 /**
283  * vmw_user_resource_noref_lookup_handle - lookup a struct resource from a
284  * TTM user-space handle and perform basic type checks
285  *
286  * @dev_priv:     Pointer to a device private struct
287  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
288  * @handle:       The TTM user-space handle
289  * @converter:    Pointer to an object describing the resource type
290  *
291  * If the handle can't be found or is associated with an incorrect resource
292  * type, -EINVAL will be returned.
293  */
294 struct vmw_resource *
295 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
296 				      struct ttm_object_file *tfile,
297 				      uint32_t handle,
298 				      const struct vmw_user_resource_conv
299 				      *converter)
300 {
301 	struct ttm_base_object *base;
302 
303 	base = ttm_base_object_noref_lookup(tfile, handle);
304 	if (!base)
305 		return ERR_PTR(-ESRCH);
306 
307 	if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
308 		ttm_base_object_noref_release();
309 		return ERR_PTR(-EINVAL);
310 	}
311 
312 	return converter->base_obj_to_res(base);
313 }
314 
315 /*
316  * Helper function that looks either a surface or bo.
317  *
318  * The pointer this pointed at by out_surf and out_buf needs to be null.
319  */
320 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
321 			   struct ttm_object_file *tfile,
322 			   uint32_t handle,
323 			   struct vmw_surface **out_surf,
324 			   struct vmw_buffer_object **out_buf)
325 {
326 	struct vmw_resource *res;
327 	int ret;
328 
329 	BUG_ON(*out_surf || *out_buf);
330 
331 	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
332 					      user_surface_converter,
333 					      &res);
334 	if (!ret) {
335 		*out_surf = vmw_res_to_srf(res);
336 		return 0;
337 	}
338 
339 	*out_surf = NULL;
340 	ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
341 	return ret;
342 }
343 
344 /**
345  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
346  *
347  * @res:            The resource for which to allocate a backup buffer.
348  * @interruptible:  Whether any sleeps during allocation should be
349  *                  performed while interruptible.
350  */
351 static int vmw_resource_buf_alloc(struct vmw_resource *res,
352 				  bool interruptible)
353 {
354 	unsigned long size =
355 		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
356 	struct vmw_buffer_object *backup;
357 	int ret;
358 
359 	if (likely(res->backup)) {
360 		BUG_ON(res->backup->base.base.size < size);
361 		return 0;
362 	}
363 
364 	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
365 	if (unlikely(!backup))
366 		return -ENOMEM;
367 
368 	ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
369 			      res->func->backup_placement,
370 			      interruptible, false,
371 			      &vmw_bo_bo_free);
372 	if (unlikely(ret != 0))
373 		goto out_no_bo;
374 
375 	res->backup = backup;
376 
377 out_no_bo:
378 	return ret;
379 }
380 
381 /**
382  * vmw_resource_do_validate - Make a resource up-to-date and visible
383  *                            to the device.
384  *
385  * @res:            The resource to make visible to the device.
386  * @val_buf:        Information about a buffer possibly
387  *                  containing backup data if a bind operation is needed.
388  * @dirtying:       Transfer dirty regions.
389  *
390  * On hardware resource shortage, this function returns -EBUSY and
391  * should be retried once resources have been freed up.
392  */
393 static int vmw_resource_do_validate(struct vmw_resource *res,
394 				    struct ttm_validate_buffer *val_buf,
395 				    bool dirtying)
396 {
397 	int ret = 0;
398 	const struct vmw_res_func *func = res->func;
399 
400 	if (unlikely(res->id == -1)) {
401 		ret = func->create(res);
402 		if (unlikely(ret != 0))
403 			return ret;
404 	}
405 
406 	if (func->bind &&
407 	    ((func->needs_backup && !vmw_resource_mob_attached(res) &&
408 	      val_buf->bo != NULL) ||
409 	     (!func->needs_backup && val_buf->bo != NULL))) {
410 		ret = func->bind(res, val_buf);
411 		if (unlikely(ret != 0))
412 			goto out_bind_failed;
413 		if (func->needs_backup)
414 			vmw_resource_mob_attach(res);
415 	}
416 
417 	/*
418 	 * Handle the case where the backup mob is marked coherent but
419 	 * the resource isn't.
420 	 */
421 	if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
422 	    !res->coherent) {
423 		if (res->backup->dirty && !res->dirty) {
424 			ret = func->dirty_alloc(res);
425 			if (ret)
426 				return ret;
427 		} else if (!res->backup->dirty && res->dirty) {
428 			func->dirty_free(res);
429 		}
430 	}
431 
432 	/*
433 	 * Transfer the dirty regions to the resource and update
434 	 * the resource.
435 	 */
436 	if (res->dirty) {
437 		if (dirtying && !res->res_dirty) {
438 			pgoff_t start = res->backup_offset >> PAGE_SHIFT;
439 			pgoff_t end = __KERNEL_DIV_ROUND_UP
440 				(res->backup_offset + res->backup_size,
441 				 PAGE_SIZE);
442 
443 			vmw_bo_dirty_unmap(res->backup, start, end);
444 		}
445 
446 		vmw_bo_dirty_transfer_to_res(res);
447 		return func->dirty_sync(res);
448 	}
449 
450 	return 0;
451 
452 out_bind_failed:
453 	func->destroy(res);
454 
455 	return ret;
456 }
457 
458 /**
459  * vmw_resource_unreserve - Unreserve a resource previously reserved for
460  * command submission.
461  *
462  * @res:               Pointer to the struct vmw_resource to unreserve.
463  * @dirty_set:         Change dirty status of the resource.
464  * @dirty:             When changing dirty status indicates the new status.
465  * @switch_backup:     Backup buffer has been switched.
466  * @new_backup:        Pointer to new backup buffer if command submission
467  *                     switched. May be NULL.
468  * @new_backup_offset: New backup offset if @switch_backup is true.
469  *
470  * Currently unreserving a resource means putting it back on the device's
471  * resource lru list, so that it can be evicted if necessary.
472  */
473 void vmw_resource_unreserve(struct vmw_resource *res,
474 			    bool dirty_set,
475 			    bool dirty,
476 			    bool switch_backup,
477 			    struct vmw_buffer_object *new_backup,
478 			    unsigned long new_backup_offset)
479 {
480 	struct vmw_private *dev_priv = res->dev_priv;
481 
482 	if (!list_empty(&res->lru_head))
483 		return;
484 
485 	if (switch_backup && new_backup != res->backup) {
486 		if (res->backup) {
487 			vmw_resource_mob_detach(res);
488 			if (res->coherent)
489 				vmw_bo_dirty_release(res->backup);
490 			vmw_bo_unreference(&res->backup);
491 		}
492 
493 		if (new_backup) {
494 			res->backup = vmw_bo_reference(new_backup);
495 
496 			/*
497 			 * The validation code should already have added a
498 			 * dirty tracker here.
499 			 */
500 			WARN_ON(res->coherent && !new_backup->dirty);
501 
502 			vmw_resource_mob_attach(res);
503 		} else {
504 			res->backup = NULL;
505 		}
506 	} else if (switch_backup && res->coherent) {
507 		vmw_bo_dirty_release(res->backup);
508 	}
509 
510 	if (switch_backup)
511 		res->backup_offset = new_backup_offset;
512 
513 	if (dirty_set)
514 		res->res_dirty = dirty;
515 
516 	if (!res->func->may_evict || res->id == -1 || res->pin_count)
517 		return;
518 
519 	spin_lock(&dev_priv->resource_lock);
520 	list_add_tail(&res->lru_head,
521 		      &res->dev_priv->res_lru[res->func->res_type]);
522 	spin_unlock(&dev_priv->resource_lock);
523 }
524 
525 /**
526  * vmw_resource_check_buffer - Check whether a backup buffer is needed
527  *                             for a resource and in that case, allocate
528  *                             one, reserve and validate it.
529  *
530  * @ticket:         The ww aqcquire context to use, or NULL if trylocking.
531  * @res:            The resource for which to allocate a backup buffer.
532  * @interruptible:  Whether any sleeps during allocation should be
533  *                  performed while interruptible.
534  * @val_buf:        On successful return contains data about the
535  *                  reserved and validated backup buffer.
536  */
537 static int
538 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
539 			  struct vmw_resource *res,
540 			  bool interruptible,
541 			  struct ttm_validate_buffer *val_buf)
542 {
543 	struct ttm_operation_ctx ctx = { true, false };
544 	struct list_head val_list;
545 	bool backup_dirty = false;
546 	int ret;
547 
548 	if (unlikely(res->backup == NULL)) {
549 		ret = vmw_resource_buf_alloc(res, interruptible);
550 		if (unlikely(ret != 0))
551 			return ret;
552 	}
553 
554 	INIT_LIST_HEAD(&val_list);
555 	ttm_bo_get(&res->backup->base);
556 	val_buf->bo = &res->backup->base;
557 	val_buf->num_shared = 0;
558 	list_add_tail(&val_buf->head, &val_list);
559 	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
560 	if (unlikely(ret != 0))
561 		goto out_no_reserve;
562 
563 	if (res->func->needs_backup && !vmw_resource_mob_attached(res))
564 		return 0;
565 
566 	backup_dirty = res->backup_dirty;
567 	ret = ttm_bo_validate(&res->backup->base,
568 			      res->func->backup_placement,
569 			      &ctx);
570 
571 	if (unlikely(ret != 0))
572 		goto out_no_validate;
573 
574 	return 0;
575 
576 out_no_validate:
577 	ttm_eu_backoff_reservation(ticket, &val_list);
578 out_no_reserve:
579 	ttm_bo_put(val_buf->bo);
580 	val_buf->bo = NULL;
581 	if (backup_dirty)
582 		vmw_bo_unreference(&res->backup);
583 
584 	return ret;
585 }
586 
587 /*
588  * vmw_resource_reserve - Reserve a resource for command submission
589  *
590  * @res:            The resource to reserve.
591  *
592  * This function takes the resource off the LRU list and make sure
593  * a backup buffer is present for guest-backed resources. However,
594  * the buffer may not be bound to the resource at this point.
595  *
596  */
597 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
598 			 bool no_backup)
599 {
600 	struct vmw_private *dev_priv = res->dev_priv;
601 	int ret;
602 
603 	spin_lock(&dev_priv->resource_lock);
604 	list_del_init(&res->lru_head);
605 	spin_unlock(&dev_priv->resource_lock);
606 
607 	if (res->func->needs_backup && res->backup == NULL &&
608 	    !no_backup) {
609 		ret = vmw_resource_buf_alloc(res, interruptible);
610 		if (unlikely(ret != 0)) {
611 			DRM_ERROR("Failed to allocate a backup buffer "
612 				  "of size %lu. bytes\n",
613 				  (unsigned long) res->backup_size);
614 			return ret;
615 		}
616 	}
617 
618 	return 0;
619 }
620 
621 /**
622  * vmw_resource_backoff_reservation - Unreserve and unreference a
623  *                                    backup buffer
624  *.
625  * @ticket:         The ww acquire ctx used for reservation.
626  * @val_buf:        Backup buffer information.
627  */
628 static void
629 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
630 				 struct ttm_validate_buffer *val_buf)
631 {
632 	struct list_head val_list;
633 
634 	if (likely(val_buf->bo == NULL))
635 		return;
636 
637 	INIT_LIST_HEAD(&val_list);
638 	list_add_tail(&val_buf->head, &val_list);
639 	ttm_eu_backoff_reservation(ticket, &val_list);
640 	ttm_bo_put(val_buf->bo);
641 	val_buf->bo = NULL;
642 }
643 
644 /**
645  * vmw_resource_do_evict - Evict a resource, and transfer its data
646  *                         to a backup buffer.
647  *
648  * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
649  * @res:            The resource to evict.
650  * @interruptible:  Whether to wait interruptible.
651  */
652 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
653 				 struct vmw_resource *res, bool interruptible)
654 {
655 	struct ttm_validate_buffer val_buf;
656 	const struct vmw_res_func *func = res->func;
657 	int ret;
658 
659 	BUG_ON(!func->may_evict);
660 
661 	val_buf.bo = NULL;
662 	val_buf.num_shared = 0;
663 	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
664 	if (unlikely(ret != 0))
665 		return ret;
666 
667 	if (unlikely(func->unbind != NULL &&
668 		     (!func->needs_backup || vmw_resource_mob_attached(res)))) {
669 		ret = func->unbind(res, res->res_dirty, &val_buf);
670 		if (unlikely(ret != 0))
671 			goto out_no_unbind;
672 		vmw_resource_mob_detach(res);
673 	}
674 	ret = func->destroy(res);
675 	res->backup_dirty = true;
676 	res->res_dirty = false;
677 out_no_unbind:
678 	vmw_resource_backoff_reservation(ticket, &val_buf);
679 
680 	return ret;
681 }
682 
683 
684 /**
685  * vmw_resource_validate - Make a resource up-to-date and visible
686  *                         to the device.
687  * @res: The resource to make visible to the device.
688  * @intr: Perform waits interruptible if possible.
689  * @dirtying: Pending GPU operation will dirty the resource
690  *
691  * On succesful return, any backup DMA buffer pointed to by @res->backup will
692  * be reserved and validated.
693  * On hardware resource shortage, this function will repeatedly evict
694  * resources of the same type until the validation succeeds.
695  *
696  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
697  * on failure.
698  */
699 int vmw_resource_validate(struct vmw_resource *res, bool intr,
700 			  bool dirtying)
701 {
702 	int ret;
703 	struct vmw_resource *evict_res;
704 	struct vmw_private *dev_priv = res->dev_priv;
705 	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
706 	struct ttm_validate_buffer val_buf;
707 	unsigned err_count = 0;
708 
709 	if (!res->func->create)
710 		return 0;
711 
712 	val_buf.bo = NULL;
713 	val_buf.num_shared = 0;
714 	if (res->backup)
715 		val_buf.bo = &res->backup->base;
716 	do {
717 		ret = vmw_resource_do_validate(res, &val_buf, dirtying);
718 		if (likely(ret != -EBUSY))
719 			break;
720 
721 		spin_lock(&dev_priv->resource_lock);
722 		if (list_empty(lru_list) || !res->func->may_evict) {
723 			DRM_ERROR("Out of device device resources "
724 				  "for %s.\n", res->func->type_name);
725 			ret = -EBUSY;
726 			spin_unlock(&dev_priv->resource_lock);
727 			break;
728 		}
729 
730 		evict_res = vmw_resource_reference
731 			(list_first_entry(lru_list, struct vmw_resource,
732 					  lru_head));
733 		list_del_init(&evict_res->lru_head);
734 
735 		spin_unlock(&dev_priv->resource_lock);
736 
737 		/* Trylock backup buffers with a NULL ticket. */
738 		ret = vmw_resource_do_evict(NULL, evict_res, intr);
739 		if (unlikely(ret != 0)) {
740 			spin_lock(&dev_priv->resource_lock);
741 			list_add_tail(&evict_res->lru_head, lru_list);
742 			spin_unlock(&dev_priv->resource_lock);
743 			if (ret == -ERESTARTSYS ||
744 			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
745 				vmw_resource_unreference(&evict_res);
746 				goto out_no_validate;
747 			}
748 		}
749 
750 		vmw_resource_unreference(&evict_res);
751 	} while (1);
752 
753 	if (unlikely(ret != 0))
754 		goto out_no_validate;
755 	else if (!res->func->needs_backup && res->backup) {
756 		WARN_ON_ONCE(vmw_resource_mob_attached(res));
757 		vmw_bo_unreference(&res->backup);
758 	}
759 
760 	return 0;
761 
762 out_no_validate:
763 	return ret;
764 }
765 
766 
767 /**
768  * vmw_resource_unbind_list
769  *
770  * @vbo: Pointer to the current backing MOB.
771  *
772  * Evicts the Guest Backed hardware resource if the backup
773  * buffer is being moved out of MOB memory.
774  * Note that this function will not race with the resource
775  * validation code, since resource validation and eviction
776  * both require the backup buffer to be reserved.
777  */
778 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
779 {
780 	struct ttm_validate_buffer val_buf = {
781 		.bo = &vbo->base,
782 		.num_shared = 0
783 	};
784 
785 	dma_resv_assert_held(vbo->base.base.resv);
786 	while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
787 		struct rb_node *node = vbo->res_tree.rb_node;
788 		struct vmw_resource *res =
789 			container_of(node, struct vmw_resource, mob_node);
790 
791 		if (!WARN_ON_ONCE(!res->func->unbind))
792 			(void) res->func->unbind(res, res->res_dirty, &val_buf);
793 
794 		res->backup_dirty = true;
795 		res->res_dirty = false;
796 		vmw_resource_mob_detach(res);
797 	}
798 
799 	(void) ttm_bo_wait(&vbo->base, false, false);
800 }
801 
802 
803 /**
804  * vmw_query_readback_all - Read back cached query states
805  *
806  * @dx_query_mob: Buffer containing the DX query MOB
807  *
808  * Read back cached states from the device if they exist.  This function
809  * assumings binding_mutex is held.
810  */
811 int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
812 {
813 	struct vmw_resource *dx_query_ctx;
814 	struct vmw_private *dev_priv;
815 	struct {
816 		SVGA3dCmdHeader header;
817 		SVGA3dCmdDXReadbackAllQuery body;
818 	} *cmd;
819 
820 
821 	/* No query bound, so do nothing */
822 	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
823 		return 0;
824 
825 	dx_query_ctx = dx_query_mob->dx_query_ctx;
826 	dev_priv     = dx_query_ctx->dev_priv;
827 
828 	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
829 	if (unlikely(cmd == NULL))
830 		return -ENOMEM;
831 
832 	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
833 	cmd->header.size = sizeof(cmd->body);
834 	cmd->body.cid    = dx_query_ctx->id;
835 
836 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
837 
838 	/* Triggers a rebind the next time affected context is bound */
839 	dx_query_mob->dx_query_ctx = NULL;
840 
841 	return 0;
842 }
843 
844 
845 
846 /**
847  * vmw_query_move_notify - Read back cached query states
848  *
849  * @bo: The TTM buffer object about to move.
850  * @old_mem: The memory region @bo is moving from.
851  * @new_mem: The memory region @bo is moving to.
852  *
853  * Called before the query MOB is swapped out to read back cached query
854  * states from the device.
855  */
856 void vmw_query_move_notify(struct ttm_buffer_object *bo,
857 			   struct ttm_resource *old_mem,
858 			   struct ttm_resource *new_mem)
859 {
860 	struct vmw_buffer_object *dx_query_mob;
861 	struct ttm_device *bdev = bo->bdev;
862 	struct vmw_private *dev_priv;
863 
864 
865 	dev_priv = container_of(bdev, struct vmw_private, bdev);
866 
867 	mutex_lock(&dev_priv->binding_mutex);
868 
869 	dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
870 	if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
871 		mutex_unlock(&dev_priv->binding_mutex);
872 		return;
873 	}
874 
875 	/* If BO is being moved from MOB to system memory */
876 	if (new_mem->mem_type == TTM_PL_SYSTEM &&
877 	    old_mem->mem_type == VMW_PL_MOB) {
878 		struct vmw_fence_obj *fence;
879 
880 		(void) vmw_query_readback_all(dx_query_mob);
881 		mutex_unlock(&dev_priv->binding_mutex);
882 
883 		/* Create a fence and attach the BO to it */
884 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
885 		vmw_bo_fence_single(bo, fence);
886 
887 		if (fence != NULL)
888 			vmw_fence_obj_unreference(&fence);
889 
890 		(void) ttm_bo_wait(bo, false, false);
891 	} else
892 		mutex_unlock(&dev_priv->binding_mutex);
893 
894 }
895 
896 /**
897  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
898  *
899  * @res:            The resource being queried.
900  */
901 bool vmw_resource_needs_backup(const struct vmw_resource *res)
902 {
903 	return res->func->needs_backup;
904 }
905 
906 /**
907  * vmw_resource_evict_type - Evict all resources of a specific type
908  *
909  * @dev_priv:       Pointer to a device private struct
910  * @type:           The resource type to evict
911  *
912  * To avoid thrashing starvation or as part of the hibernation sequence,
913  * try to evict all evictable resources of a specific type.
914  */
915 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
916 				    enum vmw_res_type type)
917 {
918 	struct list_head *lru_list = &dev_priv->res_lru[type];
919 	struct vmw_resource *evict_res;
920 	unsigned err_count = 0;
921 	int ret;
922 	struct ww_acquire_ctx ticket;
923 
924 	do {
925 		spin_lock(&dev_priv->resource_lock);
926 
927 		if (list_empty(lru_list))
928 			goto out_unlock;
929 
930 		evict_res = vmw_resource_reference(
931 			list_first_entry(lru_list, struct vmw_resource,
932 					 lru_head));
933 		list_del_init(&evict_res->lru_head);
934 		spin_unlock(&dev_priv->resource_lock);
935 
936 		/* Wait lock backup buffers with a ticket. */
937 		ret = vmw_resource_do_evict(&ticket, evict_res, false);
938 		if (unlikely(ret != 0)) {
939 			spin_lock(&dev_priv->resource_lock);
940 			list_add_tail(&evict_res->lru_head, lru_list);
941 			spin_unlock(&dev_priv->resource_lock);
942 			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
943 				vmw_resource_unreference(&evict_res);
944 				return;
945 			}
946 		}
947 
948 		vmw_resource_unreference(&evict_res);
949 	} while (1);
950 
951 out_unlock:
952 	spin_unlock(&dev_priv->resource_lock);
953 }
954 
955 /**
956  * vmw_resource_evict_all - Evict all evictable resources
957  *
958  * @dev_priv:       Pointer to a device private struct
959  *
960  * To avoid thrashing starvation or as part of the hibernation sequence,
961  * evict all evictable resources. In particular this means that all
962  * guest-backed resources that are registered with the device are
963  * evicted and the OTable becomes clean.
964  */
965 void vmw_resource_evict_all(struct vmw_private *dev_priv)
966 {
967 	enum vmw_res_type type;
968 
969 	mutex_lock(&dev_priv->cmdbuf_mutex);
970 
971 	for (type = 0; type < vmw_res_max; ++type)
972 		vmw_resource_evict_type(dev_priv, type);
973 
974 	mutex_unlock(&dev_priv->cmdbuf_mutex);
975 }
976 
977 /*
978  * vmw_resource_pin - Add a pin reference on a resource
979  *
980  * @res: The resource to add a pin reference on
981  *
982  * This function adds a pin reference, and if needed validates the resource.
983  * Having a pin reference means that the resource can never be evicted, and
984  * its id will never change as long as there is a pin reference.
985  * This function returns 0 on success and a negative error code on failure.
986  */
987 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
988 {
989 	struct ttm_operation_ctx ctx = { interruptible, false };
990 	struct vmw_private *dev_priv = res->dev_priv;
991 	int ret;
992 
993 	mutex_lock(&dev_priv->cmdbuf_mutex);
994 	ret = vmw_resource_reserve(res, interruptible, false);
995 	if (ret)
996 		goto out_no_reserve;
997 
998 	if (res->pin_count == 0) {
999 		struct vmw_buffer_object *vbo = NULL;
1000 
1001 		if (res->backup) {
1002 			vbo = res->backup;
1003 
1004 			ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1005 			if (!vbo->base.pin_count) {
1006 				ret = ttm_bo_validate
1007 					(&vbo->base,
1008 					 res->func->backup_placement,
1009 					 &ctx);
1010 				if (ret) {
1011 					ttm_bo_unreserve(&vbo->base);
1012 					goto out_no_validate;
1013 				}
1014 			}
1015 
1016 			/* Do we really need to pin the MOB as well? */
1017 			vmw_bo_pin_reserved(vbo, true);
1018 		}
1019 		ret = vmw_resource_validate(res, interruptible, true);
1020 		if (vbo)
1021 			ttm_bo_unreserve(&vbo->base);
1022 		if (ret)
1023 			goto out_no_validate;
1024 	}
1025 	res->pin_count++;
1026 
1027 out_no_validate:
1028 	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1029 out_no_reserve:
1030 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1031 
1032 	return ret;
1033 }
1034 
1035 /**
1036  * vmw_resource_unpin - Remove a pin reference from a resource
1037  *
1038  * @res: The resource to remove a pin reference from
1039  *
1040  * Having a pin reference means that the resource can never be evicted, and
1041  * its id will never change as long as there is a pin reference.
1042  */
1043 void vmw_resource_unpin(struct vmw_resource *res)
1044 {
1045 	struct vmw_private *dev_priv = res->dev_priv;
1046 	int ret;
1047 
1048 	mutex_lock(&dev_priv->cmdbuf_mutex);
1049 
1050 	ret = vmw_resource_reserve(res, false, true);
1051 	WARN_ON(ret);
1052 
1053 	WARN_ON(res->pin_count == 0);
1054 	if (--res->pin_count == 0 && res->backup) {
1055 		struct vmw_buffer_object *vbo = res->backup;
1056 
1057 		(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1058 		vmw_bo_pin_reserved(vbo, false);
1059 		ttm_bo_unreserve(&vbo->base);
1060 	}
1061 
1062 	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1063 
1064 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1065 }
1066 
1067 /**
1068  * vmw_res_type - Return the resource type
1069  *
1070  * @res: Pointer to the resource
1071  */
1072 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1073 {
1074 	return res->func->res_type;
1075 }
1076 
1077 /**
1078  * vmw_resource_dirty_update - Update a resource's dirty tracker with a
1079  * sequential range of touched backing store memory.
1080  * @res: The resource.
1081  * @start: The first page touched.
1082  * @end: The last page touched + 1.
1083  */
1084 void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1085 			       pgoff_t end)
1086 {
1087 	if (res->dirty)
1088 		res->func->dirty_range_add(res, start << PAGE_SHIFT,
1089 					   end << PAGE_SHIFT);
1090 }
1091 
1092 /**
1093  * vmw_resources_clean - Clean resources intersecting a mob range
1094  * @vbo: The mob buffer object
1095  * @start: The mob page offset starting the range
1096  * @end: The mob page offset ending the range
1097  * @num_prefault: Returns how many pages including the first have been
1098  * cleaned and are ok to prefault
1099  */
1100 int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
1101 			pgoff_t end, pgoff_t *num_prefault)
1102 {
1103 	struct rb_node *cur = vbo->res_tree.rb_node;
1104 	struct vmw_resource *found = NULL;
1105 	unsigned long res_start = start << PAGE_SHIFT;
1106 	unsigned long res_end = end << PAGE_SHIFT;
1107 	unsigned long last_cleaned = 0;
1108 
1109 	/*
1110 	 * Find the resource with lowest backup_offset that intersects the
1111 	 * range.
1112 	 */
1113 	while (cur) {
1114 		struct vmw_resource *cur_res =
1115 			container_of(cur, struct vmw_resource, mob_node);
1116 
1117 		if (cur_res->backup_offset >= res_end) {
1118 			cur = cur->rb_left;
1119 		} else if (cur_res->backup_offset + cur_res->backup_size <=
1120 			   res_start) {
1121 			cur = cur->rb_right;
1122 		} else {
1123 			found = cur_res;
1124 			cur = cur->rb_left;
1125 			/* Continue to look for resources with lower offsets */
1126 		}
1127 	}
1128 
1129 	/*
1130 	 * In order of increasing backup_offset, clean dirty resorces
1131 	 * intersecting the range.
1132 	 */
1133 	while (found) {
1134 		if (found->res_dirty) {
1135 			int ret;
1136 
1137 			if (!found->func->clean)
1138 				return -EINVAL;
1139 
1140 			ret = found->func->clean(found);
1141 			if (ret)
1142 				return ret;
1143 
1144 			found->res_dirty = false;
1145 		}
1146 		last_cleaned = found->backup_offset + found->backup_size;
1147 		cur = rb_next(&found->mob_node);
1148 		if (!cur)
1149 			break;
1150 
1151 		found = container_of(cur, struct vmw_resource, mob_node);
1152 		if (found->backup_offset >= res_end)
1153 			break;
1154 	}
1155 
1156 	/*
1157 	 * Set number of pages allowed prefaulting and fence the buffer object
1158 	 */
1159 	*num_prefault = 1;
1160 	if (last_cleaned > res_start) {
1161 		struct ttm_buffer_object *bo = &vbo->base;
1162 
1163 		*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1164 						      PAGE_SIZE);
1165 		vmw_bo_fence_single(bo, NULL);
1166 		if (bo->moving)
1167 			dma_fence_put(bo->moving);
1168 		bo->moving = dma_fence_get
1169 			(dma_resv_excl_fence(bo->base.resv));
1170 	}
1171 
1172 	return 0;
1173 }
1174