1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include <drm/drmP.h>
33 #include "vmwgfx_resource_priv.h"
34 
35 #define VMW_RES_EVICT_ERR_COUNT 10
36 
37 struct vmw_user_dma_buffer {
38 	struct ttm_prime_object prime;
39 	struct vmw_dma_buffer dma;
40 };
41 
42 struct vmw_bo_user_rep {
43 	uint32_t handle;
44 	uint64_t map_handle;
45 };
46 
47 struct vmw_stream {
48 	struct vmw_resource res;
49 	uint32_t stream_id;
50 };
51 
52 struct vmw_user_stream {
53 	struct ttm_base_object base;
54 	struct vmw_stream stream;
55 };
56 
57 
58 static uint64_t vmw_user_stream_size;
59 
60 static const struct vmw_res_func vmw_stream_func = {
61 	.res_type = vmw_res_stream,
62 	.needs_backup = false,
63 	.may_evict = false,
64 	.type_name = "video streams",
65 	.backup_placement = NULL,
66 	.create = NULL,
67 	.destroy = NULL,
68 	.bind = NULL,
69 	.unbind = NULL
70 };
71 
72 static inline struct vmw_dma_buffer *
73 vmw_dma_buffer(struct ttm_buffer_object *bo)
74 {
75 	return container_of(bo, struct vmw_dma_buffer, base);
76 }
77 
78 static inline struct vmw_user_dma_buffer *
79 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
80 {
81 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
82 	return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
83 }
84 
85 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
86 {
87 	kref_get(&res->kref);
88 	return res;
89 }
90 
91 struct vmw_resource *
92 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
93 {
94 	return kref_get_unless_zero(&res->kref) ? res : NULL;
95 }
96 
97 /**
98  * vmw_resource_release_id - release a resource id to the id manager.
99  *
100  * @res: Pointer to the resource.
101  *
102  * Release the resource id to the resource id manager and set it to -1
103  */
104 void vmw_resource_release_id(struct vmw_resource *res)
105 {
106 	struct vmw_private *dev_priv = res->dev_priv;
107 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
108 
109 	write_lock(&dev_priv->resource_lock);
110 	if (res->id != -1)
111 		idr_remove(idr, res->id);
112 	res->id = -1;
113 	write_unlock(&dev_priv->resource_lock);
114 }
115 
116 static void vmw_resource_release(struct kref *kref)
117 {
118 	struct vmw_resource *res =
119 	    container_of(kref, struct vmw_resource, kref);
120 	struct vmw_private *dev_priv = res->dev_priv;
121 	int id;
122 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
123 
124 	res->avail = false;
125 	list_del_init(&res->lru_head);
126 	write_unlock(&dev_priv->resource_lock);
127 	if (res->backup) {
128 		struct ttm_buffer_object *bo = &res->backup->base;
129 
130 		ttm_bo_reserve(bo, false, false, false, 0);
131 		if (!list_empty(&res->mob_head) &&
132 		    res->func->unbind != NULL) {
133 			struct ttm_validate_buffer val_buf;
134 
135 			val_buf.bo = bo;
136 			res->func->unbind(res, false, &val_buf);
137 		}
138 		res->backup_dirty = false;
139 		list_del_init(&res->mob_head);
140 		ttm_bo_unreserve(bo);
141 		vmw_dmabuf_unreference(&res->backup);
142 	}
143 
144 	if (likely(res->hw_destroy != NULL)) {
145 		res->hw_destroy(res);
146 		mutex_lock(&dev_priv->binding_mutex);
147 		vmw_context_binding_res_list_kill(&res->binding_head);
148 		mutex_unlock(&dev_priv->binding_mutex);
149 	}
150 
151 	id = res->id;
152 	if (res->res_free != NULL)
153 		res->res_free(res);
154 	else
155 		kfree(res);
156 
157 	write_lock(&dev_priv->resource_lock);
158 
159 	if (id != -1)
160 		idr_remove(idr, id);
161 }
162 
163 void vmw_resource_unreference(struct vmw_resource **p_res)
164 {
165 	struct vmw_resource *res = *p_res;
166 	struct vmw_private *dev_priv = res->dev_priv;
167 
168 	*p_res = NULL;
169 	write_lock(&dev_priv->resource_lock);
170 	kref_put(&res->kref, vmw_resource_release);
171 	write_unlock(&dev_priv->resource_lock);
172 }
173 
174 
175 /**
176  * vmw_resource_alloc_id - release a resource id to the id manager.
177  *
178  * @res: Pointer to the resource.
179  *
180  * Allocate the lowest free resource from the resource manager, and set
181  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
182  */
183 int vmw_resource_alloc_id(struct vmw_resource *res)
184 {
185 	struct vmw_private *dev_priv = res->dev_priv;
186 	int ret;
187 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
188 
189 	BUG_ON(res->id != -1);
190 
191 	idr_preload(GFP_KERNEL);
192 	write_lock(&dev_priv->resource_lock);
193 
194 	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
195 	if (ret >= 0)
196 		res->id = ret;
197 
198 	write_unlock(&dev_priv->resource_lock);
199 	idr_preload_end();
200 	return ret < 0 ? ret : 0;
201 }
202 
203 /**
204  * vmw_resource_init - initialize a struct vmw_resource
205  *
206  * @dev_priv:       Pointer to a device private struct.
207  * @res:            The struct vmw_resource to initialize.
208  * @obj_type:       Resource object type.
209  * @delay_id:       Boolean whether to defer device id allocation until
210  *                  the first validation.
211  * @res_free:       Resource destructor.
212  * @func:           Resource function table.
213  */
214 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
215 		      bool delay_id,
216 		      void (*res_free) (struct vmw_resource *res),
217 		      const struct vmw_res_func *func)
218 {
219 	kref_init(&res->kref);
220 	res->hw_destroy = NULL;
221 	res->res_free = res_free;
222 	res->avail = false;
223 	res->dev_priv = dev_priv;
224 	res->func = func;
225 	INIT_LIST_HEAD(&res->lru_head);
226 	INIT_LIST_HEAD(&res->mob_head);
227 	INIT_LIST_HEAD(&res->binding_head);
228 	res->id = -1;
229 	res->backup = NULL;
230 	res->backup_offset = 0;
231 	res->backup_dirty = false;
232 	res->res_dirty = false;
233 	if (delay_id)
234 		return 0;
235 	else
236 		return vmw_resource_alloc_id(res);
237 }
238 
239 /**
240  * vmw_resource_activate
241  *
242  * @res:        Pointer to the newly created resource
243  * @hw_destroy: Destroy function. NULL if none.
244  *
245  * Activate a resource after the hardware has been made aware of it.
246  * Set tye destroy function to @destroy. Typically this frees the
247  * resource and destroys the hardware resources associated with it.
248  * Activate basically means that the function vmw_resource_lookup will
249  * find it.
250  */
251 void vmw_resource_activate(struct vmw_resource *res,
252 			   void (*hw_destroy) (struct vmw_resource *))
253 {
254 	struct vmw_private *dev_priv = res->dev_priv;
255 
256 	write_lock(&dev_priv->resource_lock);
257 	res->avail = true;
258 	res->hw_destroy = hw_destroy;
259 	write_unlock(&dev_priv->resource_lock);
260 }
261 
262 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
263 					 struct idr *idr, int id)
264 {
265 	struct vmw_resource *res;
266 
267 	read_lock(&dev_priv->resource_lock);
268 	res = idr_find(idr, id);
269 	if (res && res->avail)
270 		kref_get(&res->kref);
271 	else
272 		res = NULL;
273 	read_unlock(&dev_priv->resource_lock);
274 
275 	if (unlikely(res == NULL))
276 		return NULL;
277 
278 	return res;
279 }
280 
281 /**
282  * vmw_user_resource_lookup_handle - lookup a struct resource from a
283  * TTM user-space handle and perform basic type checks
284  *
285  * @dev_priv:     Pointer to a device private struct
286  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
287  * @handle:       The TTM user-space handle
288  * @converter:    Pointer to an object describing the resource type
289  * @p_res:        On successful return the location pointed to will contain
290  *                a pointer to a refcounted struct vmw_resource.
291  *
292  * If the handle can't be found or is associated with an incorrect resource
293  * type, -EINVAL will be returned.
294  */
295 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
296 				    struct ttm_object_file *tfile,
297 				    uint32_t handle,
298 				    const struct vmw_user_resource_conv
299 				    *converter,
300 				    struct vmw_resource **p_res)
301 {
302 	struct ttm_base_object *base;
303 	struct vmw_resource *res;
304 	int ret = -EINVAL;
305 
306 	base = ttm_base_object_lookup(tfile, handle);
307 	if (unlikely(base == NULL))
308 		return -EINVAL;
309 
310 	if (unlikely(ttm_base_object_type(base) != converter->object_type))
311 		goto out_bad_resource;
312 
313 	res = converter->base_obj_to_res(base);
314 
315 	read_lock(&dev_priv->resource_lock);
316 	if (!res->avail || res->res_free != converter->res_free) {
317 		read_unlock(&dev_priv->resource_lock);
318 		goto out_bad_resource;
319 	}
320 
321 	kref_get(&res->kref);
322 	read_unlock(&dev_priv->resource_lock);
323 
324 	*p_res = res;
325 	ret = 0;
326 
327 out_bad_resource:
328 	ttm_base_object_unref(&base);
329 
330 	return ret;
331 }
332 
333 /**
334  * Helper function that looks either a surface or dmabuf.
335  *
336  * The pointer this pointed at by out_surf and out_buf needs to be null.
337  */
338 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
339 			   struct ttm_object_file *tfile,
340 			   uint32_t handle,
341 			   struct vmw_surface **out_surf,
342 			   struct vmw_dma_buffer **out_buf)
343 {
344 	struct vmw_resource *res;
345 	int ret;
346 
347 	BUG_ON(*out_surf || *out_buf);
348 
349 	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
350 					      user_surface_converter,
351 					      &res);
352 	if (!ret) {
353 		*out_surf = vmw_res_to_srf(res);
354 		return 0;
355 	}
356 
357 	*out_surf = NULL;
358 	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
359 	return ret;
360 }
361 
362 /**
363  * Buffer management.
364  */
365 
366 /**
367  * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
368  *
369  * @dev_priv: Pointer to a struct vmw_private identifying the device.
370  * @size: The requested buffer size.
371  * @user: Whether this is an ordinary dma buffer or a user dma buffer.
372  */
373 static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
374 				  bool user)
375 {
376 	static size_t struct_size, user_struct_size;
377 	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
378 	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
379 
380 	if (unlikely(struct_size == 0)) {
381 		size_t backend_size = ttm_round_pot(vmw_tt_size);
382 
383 		struct_size = backend_size +
384 			ttm_round_pot(sizeof(struct vmw_dma_buffer));
385 		user_struct_size = backend_size +
386 			ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
387 	}
388 
389 	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
390 		page_array_size +=
391 			ttm_round_pot(num_pages * sizeof(dma_addr_t));
392 
393 	return ((user) ? user_struct_size : struct_size) +
394 		page_array_size;
395 }
396 
397 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
398 {
399 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
400 
401 	kfree(vmw_bo);
402 }
403 
404 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
405 {
406 	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
407 
408 	ttm_prime_object_kfree(vmw_user_bo, prime);
409 }
410 
411 int vmw_dmabuf_init(struct vmw_private *dev_priv,
412 		    struct vmw_dma_buffer *vmw_bo,
413 		    size_t size, struct ttm_placement *placement,
414 		    bool interruptible,
415 		    void (*bo_free) (struct ttm_buffer_object *bo))
416 {
417 	struct ttm_bo_device *bdev = &dev_priv->bdev;
418 	size_t acc_size;
419 	int ret;
420 	bool user = (bo_free == &vmw_user_dmabuf_destroy);
421 
422 	BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
423 
424 	acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
425 	memset(vmw_bo, 0, sizeof(*vmw_bo));
426 
427 	INIT_LIST_HEAD(&vmw_bo->res_list);
428 
429 	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
430 			  (user) ? ttm_bo_type_device :
431 			  ttm_bo_type_kernel, placement,
432 			  0, interruptible,
433 			  NULL, acc_size, NULL, bo_free);
434 	return ret;
435 }
436 
437 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
438 {
439 	struct vmw_user_dma_buffer *vmw_user_bo;
440 	struct ttm_base_object *base = *p_base;
441 	struct ttm_buffer_object *bo;
442 
443 	*p_base = NULL;
444 
445 	if (unlikely(base == NULL))
446 		return;
447 
448 	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
449 				   prime.base);
450 	bo = &vmw_user_bo->dma.base;
451 	ttm_bo_unref(&bo);
452 }
453 
454 static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
455 					    enum ttm_ref_type ref_type)
456 {
457 	struct vmw_user_dma_buffer *user_bo;
458 	user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
459 
460 	switch (ref_type) {
461 	case TTM_REF_SYNCCPU_WRITE:
462 		ttm_bo_synccpu_write_release(&user_bo->dma.base);
463 		break;
464 	default:
465 		BUG();
466 	}
467 }
468 
469 /**
470  * vmw_user_dmabuf_alloc - Allocate a user dma buffer
471  *
472  * @dev_priv: Pointer to a struct device private.
473  * @tfile: Pointer to a struct ttm_object_file on which to register the user
474  * object.
475  * @size: Size of the dma buffer.
476  * @shareable: Boolean whether the buffer is shareable with other open files.
477  * @handle: Pointer to where the handle value should be assigned.
478  * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
479  * should be assigned.
480  */
481 int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
482 			  struct ttm_object_file *tfile,
483 			  uint32_t size,
484 			  bool shareable,
485 			  uint32_t *handle,
486 			  struct vmw_dma_buffer **p_dma_buf)
487 {
488 	struct vmw_user_dma_buffer *user_bo;
489 	struct ttm_buffer_object *tmp;
490 	int ret;
491 
492 	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
493 	if (unlikely(user_bo == NULL)) {
494 		DRM_ERROR("Failed to allocate a buffer.\n");
495 		return -ENOMEM;
496 	}
497 
498 	ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
499 			      (dev_priv->has_mob) ?
500 			      &vmw_sys_placement :
501 			      &vmw_vram_sys_placement, true,
502 			      &vmw_user_dmabuf_destroy);
503 	if (unlikely(ret != 0))
504 		return ret;
505 
506 	tmp = ttm_bo_reference(&user_bo->dma.base);
507 	ret = ttm_prime_object_init(tfile,
508 				    size,
509 				    &user_bo->prime,
510 				    shareable,
511 				    ttm_buffer_type,
512 				    &vmw_user_dmabuf_release,
513 				    &vmw_user_dmabuf_ref_obj_release);
514 	if (unlikely(ret != 0)) {
515 		ttm_bo_unref(&tmp);
516 		goto out_no_base_object;
517 	}
518 
519 	*p_dma_buf = &user_bo->dma;
520 	*handle = user_bo->prime.base.hash.key;
521 
522 out_no_base_object:
523 	return ret;
524 }
525 
526 /**
527  * vmw_user_dmabuf_verify_access - verify access permissions on this
528  * buffer object.
529  *
530  * @bo: Pointer to the buffer object being accessed
531  * @tfile: Identifying the caller.
532  */
533 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
534 				  struct ttm_object_file *tfile)
535 {
536 	struct vmw_user_dma_buffer *vmw_user_bo;
537 
538 	if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
539 		return -EPERM;
540 
541 	vmw_user_bo = vmw_user_dma_buffer(bo);
542 	return (vmw_user_bo->prime.base.tfile == tfile ||
543 		vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
544 }
545 
546 /**
547  * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
548  * access, idling previous GPU operations on the buffer and optionally
549  * blocking it for further command submissions.
550  *
551  * @user_bo: Pointer to the buffer object being grabbed for CPU access
552  * @tfile: Identifying the caller.
553  * @flags: Flags indicating how the grab should be performed.
554  *
555  * A blocking grab will be automatically released when @tfile is closed.
556  */
557 static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
558 					struct ttm_object_file *tfile,
559 					uint32_t flags)
560 {
561 	struct ttm_buffer_object *bo = &user_bo->dma.base;
562 	bool existed;
563 	int ret;
564 
565 	if (flags & drm_vmw_synccpu_allow_cs) {
566 		struct ttm_bo_device *bdev = bo->bdev;
567 
568 		spin_lock(&bdev->fence_lock);
569 		ret = ttm_bo_wait(bo, false, true,
570 				  !!(flags & drm_vmw_synccpu_dontblock));
571 		spin_unlock(&bdev->fence_lock);
572 		return ret;
573 	}
574 
575 	ret = ttm_bo_synccpu_write_grab
576 		(bo, !!(flags & drm_vmw_synccpu_dontblock));
577 	if (unlikely(ret != 0))
578 		return ret;
579 
580 	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
581 				 TTM_REF_SYNCCPU_WRITE, &existed);
582 	if (ret != 0 || existed)
583 		ttm_bo_synccpu_write_release(&user_bo->dma.base);
584 
585 	return ret;
586 }
587 
588 /**
589  * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
590  * and unblock command submission on the buffer if blocked.
591  *
592  * @handle: Handle identifying the buffer object.
593  * @tfile: Identifying the caller.
594  * @flags: Flags indicating the type of release.
595  */
596 static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
597 					   struct ttm_object_file *tfile,
598 					   uint32_t flags)
599 {
600 	if (!(flags & drm_vmw_synccpu_allow_cs))
601 		return ttm_ref_object_base_unref(tfile, handle,
602 						 TTM_REF_SYNCCPU_WRITE);
603 
604 	return 0;
605 }
606 
607 /**
608  * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
609  * functionality.
610  *
611  * @dev: Identifies the drm device.
612  * @data: Pointer to the ioctl argument.
613  * @file_priv: Identifies the caller.
614  *
615  * This function checks the ioctl arguments for validity and calls the
616  * relevant synccpu functions.
617  */
618 int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
619 				  struct drm_file *file_priv)
620 {
621 	struct drm_vmw_synccpu_arg *arg =
622 		(struct drm_vmw_synccpu_arg *) data;
623 	struct vmw_dma_buffer *dma_buf;
624 	struct vmw_user_dma_buffer *user_bo;
625 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
626 	int ret;
627 
628 	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
629 	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
630 			       drm_vmw_synccpu_dontblock |
631 			       drm_vmw_synccpu_allow_cs)) != 0) {
632 		DRM_ERROR("Illegal synccpu flags.\n");
633 		return -EINVAL;
634 	}
635 
636 	switch (arg->op) {
637 	case drm_vmw_synccpu_grab:
638 		ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
639 		if (unlikely(ret != 0))
640 			return ret;
641 
642 		user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
643 				       dma);
644 		ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
645 		vmw_dmabuf_unreference(&dma_buf);
646 		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
647 			     ret != -EBUSY)) {
648 			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
649 				  (unsigned int) arg->handle);
650 			return ret;
651 		}
652 		break;
653 	case drm_vmw_synccpu_release:
654 		ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
655 						      arg->flags);
656 		if (unlikely(ret != 0)) {
657 			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
658 				  (unsigned int) arg->handle);
659 			return ret;
660 		}
661 		break;
662 	default:
663 		DRM_ERROR("Invalid synccpu operation.\n");
664 		return -EINVAL;
665 	}
666 
667 	return 0;
668 }
669 
670 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
671 			   struct drm_file *file_priv)
672 {
673 	struct vmw_private *dev_priv = vmw_priv(dev);
674 	union drm_vmw_alloc_dmabuf_arg *arg =
675 	    (union drm_vmw_alloc_dmabuf_arg *)data;
676 	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
677 	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
678 	struct vmw_dma_buffer *dma_buf;
679 	uint32_t handle;
680 	struct vmw_master *vmaster = vmw_master(file_priv->master);
681 	int ret;
682 
683 	ret = ttm_read_lock(&vmaster->lock, true);
684 	if (unlikely(ret != 0))
685 		return ret;
686 
687 	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
688 				    req->size, false, &handle, &dma_buf);
689 	if (unlikely(ret != 0))
690 		goto out_no_dmabuf;
691 
692 	rep->handle = handle;
693 	rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
694 	rep->cur_gmr_id = handle;
695 	rep->cur_gmr_offset = 0;
696 
697 	vmw_dmabuf_unreference(&dma_buf);
698 
699 out_no_dmabuf:
700 	ttm_read_unlock(&vmaster->lock);
701 
702 	return ret;
703 }
704 
705 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
706 			   struct drm_file *file_priv)
707 {
708 	struct drm_vmw_unref_dmabuf_arg *arg =
709 	    (struct drm_vmw_unref_dmabuf_arg *)data;
710 
711 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
712 					 arg->handle,
713 					 TTM_REF_USAGE);
714 }
715 
716 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
717 			   uint32_t handle, struct vmw_dma_buffer **out)
718 {
719 	struct vmw_user_dma_buffer *vmw_user_bo;
720 	struct ttm_base_object *base;
721 
722 	base = ttm_base_object_lookup(tfile, handle);
723 	if (unlikely(base == NULL)) {
724 		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
725 		       (unsigned long)handle);
726 		return -ESRCH;
727 	}
728 
729 	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
730 		ttm_base_object_unref(&base);
731 		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
732 		       (unsigned long)handle);
733 		return -EINVAL;
734 	}
735 
736 	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
737 				   prime.base);
738 	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
739 	ttm_base_object_unref(&base);
740 	*out = &vmw_user_bo->dma;
741 
742 	return 0;
743 }
744 
745 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
746 			      struct vmw_dma_buffer *dma_buf,
747 			      uint32_t *handle)
748 {
749 	struct vmw_user_dma_buffer *user_bo;
750 
751 	if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
752 		return -EINVAL;
753 
754 	user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
755 
756 	*handle = user_bo->prime.base.hash.key;
757 	return ttm_ref_object_add(tfile, &user_bo->prime.base,
758 				  TTM_REF_USAGE, NULL);
759 }
760 
761 /*
762  * Stream management
763  */
764 
765 static void vmw_stream_destroy(struct vmw_resource *res)
766 {
767 	struct vmw_private *dev_priv = res->dev_priv;
768 	struct vmw_stream *stream;
769 	int ret;
770 
771 	DRM_INFO("%s: unref\n", __func__);
772 	stream = container_of(res, struct vmw_stream, res);
773 
774 	ret = vmw_overlay_unref(dev_priv, stream->stream_id);
775 	WARN_ON(ret != 0);
776 }
777 
778 static int vmw_stream_init(struct vmw_private *dev_priv,
779 			   struct vmw_stream *stream,
780 			   void (*res_free) (struct vmw_resource *res))
781 {
782 	struct vmw_resource *res = &stream->res;
783 	int ret;
784 
785 	ret = vmw_resource_init(dev_priv, res, false, res_free,
786 				&vmw_stream_func);
787 
788 	if (unlikely(ret != 0)) {
789 		if (res_free == NULL)
790 			kfree(stream);
791 		else
792 			res_free(&stream->res);
793 		return ret;
794 	}
795 
796 	ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
797 	if (ret) {
798 		vmw_resource_unreference(&res);
799 		return ret;
800 	}
801 
802 	DRM_INFO("%s: claimed\n", __func__);
803 
804 	vmw_resource_activate(&stream->res, vmw_stream_destroy);
805 	return 0;
806 }
807 
808 static void vmw_user_stream_free(struct vmw_resource *res)
809 {
810 	struct vmw_user_stream *stream =
811 	    container_of(res, struct vmw_user_stream, stream.res);
812 	struct vmw_private *dev_priv = res->dev_priv;
813 
814 	ttm_base_object_kfree(stream, base);
815 	ttm_mem_global_free(vmw_mem_glob(dev_priv),
816 			    vmw_user_stream_size);
817 }
818 
819 /**
820  * This function is called when user space has no more references on the
821  * base object. It releases the base-object's reference on the resource object.
822  */
823 
824 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
825 {
826 	struct ttm_base_object *base = *p_base;
827 	struct vmw_user_stream *stream =
828 	    container_of(base, struct vmw_user_stream, base);
829 	struct vmw_resource *res = &stream->stream.res;
830 
831 	*p_base = NULL;
832 	vmw_resource_unreference(&res);
833 }
834 
835 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
836 			   struct drm_file *file_priv)
837 {
838 	struct vmw_private *dev_priv = vmw_priv(dev);
839 	struct vmw_resource *res;
840 	struct vmw_user_stream *stream;
841 	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
842 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
843 	struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
844 	int ret = 0;
845 
846 
847 	res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
848 	if (unlikely(res == NULL))
849 		return -EINVAL;
850 
851 	if (res->res_free != &vmw_user_stream_free) {
852 		ret = -EINVAL;
853 		goto out;
854 	}
855 
856 	stream = container_of(res, struct vmw_user_stream, stream.res);
857 	if (stream->base.tfile != tfile) {
858 		ret = -EINVAL;
859 		goto out;
860 	}
861 
862 	ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
863 out:
864 	vmw_resource_unreference(&res);
865 	return ret;
866 }
867 
868 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
869 			   struct drm_file *file_priv)
870 {
871 	struct vmw_private *dev_priv = vmw_priv(dev);
872 	struct vmw_user_stream *stream;
873 	struct vmw_resource *res;
874 	struct vmw_resource *tmp;
875 	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
876 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
877 	struct vmw_master *vmaster = vmw_master(file_priv->master);
878 	int ret;
879 
880 	/*
881 	 * Approximate idr memory usage with 128 bytes. It will be limited
882 	 * by maximum number_of streams anyway?
883 	 */
884 
885 	if (unlikely(vmw_user_stream_size == 0))
886 		vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
887 
888 	ret = ttm_read_lock(&vmaster->lock, true);
889 	if (unlikely(ret != 0))
890 		return ret;
891 
892 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
893 				   vmw_user_stream_size,
894 				   false, true);
895 	if (unlikely(ret != 0)) {
896 		if (ret != -ERESTARTSYS)
897 			DRM_ERROR("Out of graphics memory for stream"
898 				  " creation.\n");
899 		goto out_unlock;
900 	}
901 
902 
903 	stream = kmalloc(sizeof(*stream), GFP_KERNEL);
904 	if (unlikely(stream == NULL)) {
905 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
906 				    vmw_user_stream_size);
907 		ret = -ENOMEM;
908 		goto out_unlock;
909 	}
910 
911 	res = &stream->stream.res;
912 	stream->base.shareable = false;
913 	stream->base.tfile = NULL;
914 
915 	/*
916 	 * From here on, the destructor takes over resource freeing.
917 	 */
918 
919 	ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
920 	if (unlikely(ret != 0))
921 		goto out_unlock;
922 
923 	tmp = vmw_resource_reference(res);
924 	ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
925 				   &vmw_user_stream_base_release, NULL);
926 
927 	if (unlikely(ret != 0)) {
928 		vmw_resource_unreference(&tmp);
929 		goto out_err;
930 	}
931 
932 	arg->stream_id = res->id;
933 out_err:
934 	vmw_resource_unreference(&res);
935 out_unlock:
936 	ttm_read_unlock(&vmaster->lock);
937 	return ret;
938 }
939 
940 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
941 			   struct ttm_object_file *tfile,
942 			   uint32_t *inout_id, struct vmw_resource **out)
943 {
944 	struct vmw_user_stream *stream;
945 	struct vmw_resource *res;
946 	int ret;
947 
948 	res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
949 				  *inout_id);
950 	if (unlikely(res == NULL))
951 		return -EINVAL;
952 
953 	if (res->res_free != &vmw_user_stream_free) {
954 		ret = -EINVAL;
955 		goto err_ref;
956 	}
957 
958 	stream = container_of(res, struct vmw_user_stream, stream.res);
959 	if (stream->base.tfile != tfile) {
960 		ret = -EPERM;
961 		goto err_ref;
962 	}
963 
964 	*inout_id = stream->stream.stream_id;
965 	*out = res;
966 	return 0;
967 err_ref:
968 	vmw_resource_unreference(&res);
969 	return ret;
970 }
971 
972 
973 /**
974  * vmw_dumb_create - Create a dumb kms buffer
975  *
976  * @file_priv: Pointer to a struct drm_file identifying the caller.
977  * @dev: Pointer to the drm device.
978  * @args: Pointer to a struct drm_mode_create_dumb structure
979  *
980  * This is a driver callback for the core drm create_dumb functionality.
981  * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
982  * that the arguments have a different format.
983  */
984 int vmw_dumb_create(struct drm_file *file_priv,
985 		    struct drm_device *dev,
986 		    struct drm_mode_create_dumb *args)
987 {
988 	struct vmw_private *dev_priv = vmw_priv(dev);
989 	struct vmw_master *vmaster = vmw_master(file_priv->master);
990 	struct vmw_dma_buffer *dma_buf;
991 	int ret;
992 
993 	args->pitch = args->width * ((args->bpp + 7) / 8);
994 	args->size = args->pitch * args->height;
995 
996 	ret = ttm_read_lock(&vmaster->lock, true);
997 	if (unlikely(ret != 0))
998 		return ret;
999 
1000 	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1001 				    args->size, false, &args->handle,
1002 				    &dma_buf);
1003 	if (unlikely(ret != 0))
1004 		goto out_no_dmabuf;
1005 
1006 	vmw_dmabuf_unreference(&dma_buf);
1007 out_no_dmabuf:
1008 	ttm_read_unlock(&vmaster->lock);
1009 	return ret;
1010 }
1011 
1012 /**
1013  * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1014  *
1015  * @file_priv: Pointer to a struct drm_file identifying the caller.
1016  * @dev: Pointer to the drm device.
1017  * @handle: Handle identifying the dumb buffer.
1018  * @offset: The address space offset returned.
1019  *
1020  * This is a driver callback for the core drm dumb_map_offset functionality.
1021  */
1022 int vmw_dumb_map_offset(struct drm_file *file_priv,
1023 			struct drm_device *dev, uint32_t handle,
1024 			uint64_t *offset)
1025 {
1026 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1027 	struct vmw_dma_buffer *out_buf;
1028 	int ret;
1029 
1030 	ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
1031 	if (ret != 0)
1032 		return -EINVAL;
1033 
1034 	*offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1035 	vmw_dmabuf_unreference(&out_buf);
1036 	return 0;
1037 }
1038 
1039 /**
1040  * vmw_dumb_destroy - Destroy a dumb boffer
1041  *
1042  * @file_priv: Pointer to a struct drm_file identifying the caller.
1043  * @dev: Pointer to the drm device.
1044  * @handle: Handle identifying the dumb buffer.
1045  *
1046  * This is a driver callback for the core drm dumb_destroy functionality.
1047  */
1048 int vmw_dumb_destroy(struct drm_file *file_priv,
1049 		     struct drm_device *dev,
1050 		     uint32_t handle)
1051 {
1052 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1053 					 handle, TTM_REF_USAGE);
1054 }
1055 
1056 /**
1057  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
1058  *
1059  * @res:            The resource for which to allocate a backup buffer.
1060  * @interruptible:  Whether any sleeps during allocation should be
1061  *                  performed while interruptible.
1062  */
1063 static int vmw_resource_buf_alloc(struct vmw_resource *res,
1064 				  bool interruptible)
1065 {
1066 	unsigned long size =
1067 		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
1068 	struct vmw_dma_buffer *backup;
1069 	int ret;
1070 
1071 	if (likely(res->backup)) {
1072 		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
1073 		return 0;
1074 	}
1075 
1076 	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
1077 	if (unlikely(backup == NULL))
1078 		return -ENOMEM;
1079 
1080 	ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
1081 			      res->func->backup_placement,
1082 			      interruptible,
1083 			      &vmw_dmabuf_bo_free);
1084 	if (unlikely(ret != 0))
1085 		goto out_no_dmabuf;
1086 
1087 	res->backup = backup;
1088 
1089 out_no_dmabuf:
1090 	return ret;
1091 }
1092 
1093 /**
1094  * vmw_resource_do_validate - Make a resource up-to-date and visible
1095  *                            to the device.
1096  *
1097  * @res:            The resource to make visible to the device.
1098  * @val_buf:        Information about a buffer possibly
1099  *                  containing backup data if a bind operation is needed.
1100  *
1101  * On hardware resource shortage, this function returns -EBUSY and
1102  * should be retried once resources have been freed up.
1103  */
1104 static int vmw_resource_do_validate(struct vmw_resource *res,
1105 				    struct ttm_validate_buffer *val_buf)
1106 {
1107 	int ret = 0;
1108 	const struct vmw_res_func *func = res->func;
1109 
1110 	if (unlikely(res->id == -1)) {
1111 		ret = func->create(res);
1112 		if (unlikely(ret != 0))
1113 			return ret;
1114 	}
1115 
1116 	if (func->bind &&
1117 	    ((func->needs_backup && list_empty(&res->mob_head) &&
1118 	      val_buf->bo != NULL) ||
1119 	     (!func->needs_backup && val_buf->bo != NULL))) {
1120 		ret = func->bind(res, val_buf);
1121 		if (unlikely(ret != 0))
1122 			goto out_bind_failed;
1123 		if (func->needs_backup)
1124 			list_add_tail(&res->mob_head, &res->backup->res_list);
1125 	}
1126 
1127 	/*
1128 	 * Only do this on write operations, and move to
1129 	 * vmw_resource_unreserve if it can be called after
1130 	 * backup buffers have been unreserved. Otherwise
1131 	 * sort out locking.
1132 	 */
1133 	res->res_dirty = true;
1134 
1135 	return 0;
1136 
1137 out_bind_failed:
1138 	func->destroy(res);
1139 
1140 	return ret;
1141 }
1142 
1143 /**
1144  * vmw_resource_unreserve - Unreserve a resource previously reserved for
1145  * command submission.
1146  *
1147  * @res:               Pointer to the struct vmw_resource to unreserve.
1148  * @new_backup:        Pointer to new backup buffer if command submission
1149  *                     switched.
1150  * @new_backup_offset: New backup offset if @new_backup is !NULL.
1151  *
1152  * Currently unreserving a resource means putting it back on the device's
1153  * resource lru list, so that it can be evicted if necessary.
1154  */
1155 void vmw_resource_unreserve(struct vmw_resource *res,
1156 			    struct vmw_dma_buffer *new_backup,
1157 			    unsigned long new_backup_offset)
1158 {
1159 	struct vmw_private *dev_priv = res->dev_priv;
1160 
1161 	if (!list_empty(&res->lru_head))
1162 		return;
1163 
1164 	if (new_backup && new_backup != res->backup) {
1165 
1166 		if (res->backup) {
1167 			lockdep_assert_held(&res->backup->base.resv->lock.base);
1168 			list_del_init(&res->mob_head);
1169 			vmw_dmabuf_unreference(&res->backup);
1170 		}
1171 
1172 		res->backup = vmw_dmabuf_reference(new_backup);
1173 		lockdep_assert_held(&new_backup->base.resv->lock.base);
1174 		list_add_tail(&res->mob_head, &new_backup->res_list);
1175 	}
1176 	if (new_backup)
1177 		res->backup_offset = new_backup_offset;
1178 
1179 	if (!res->func->may_evict || res->id == -1)
1180 		return;
1181 
1182 	write_lock(&dev_priv->resource_lock);
1183 	list_add_tail(&res->lru_head,
1184 		      &res->dev_priv->res_lru[res->func->res_type]);
1185 	write_unlock(&dev_priv->resource_lock);
1186 }
1187 
1188 /**
1189  * vmw_resource_check_buffer - Check whether a backup buffer is needed
1190  *                             for a resource and in that case, allocate
1191  *                             one, reserve and validate it.
1192  *
1193  * @res:            The resource for which to allocate a backup buffer.
1194  * @interruptible:  Whether any sleeps during allocation should be
1195  *                  performed while interruptible.
1196  * @val_buf:        On successful return contains data about the
1197  *                  reserved and validated backup buffer.
1198  */
1199 static int
1200 vmw_resource_check_buffer(struct vmw_resource *res,
1201 			  bool interruptible,
1202 			  struct ttm_validate_buffer *val_buf)
1203 {
1204 	struct list_head val_list;
1205 	bool backup_dirty = false;
1206 	int ret;
1207 
1208 	if (unlikely(res->backup == NULL)) {
1209 		ret = vmw_resource_buf_alloc(res, interruptible);
1210 		if (unlikely(ret != 0))
1211 			return ret;
1212 	}
1213 
1214 	INIT_LIST_HEAD(&val_list);
1215 	val_buf->bo = ttm_bo_reference(&res->backup->base);
1216 	list_add_tail(&val_buf->head, &val_list);
1217 	ret = ttm_eu_reserve_buffers(NULL, &val_list);
1218 	if (unlikely(ret != 0))
1219 		goto out_no_reserve;
1220 
1221 	if (res->func->needs_backup && list_empty(&res->mob_head))
1222 		return 0;
1223 
1224 	backup_dirty = res->backup_dirty;
1225 	ret = ttm_bo_validate(&res->backup->base,
1226 			      res->func->backup_placement,
1227 			      true, false);
1228 
1229 	if (unlikely(ret != 0))
1230 		goto out_no_validate;
1231 
1232 	return 0;
1233 
1234 out_no_validate:
1235 	ttm_eu_backoff_reservation(NULL, &val_list);
1236 out_no_reserve:
1237 	ttm_bo_unref(&val_buf->bo);
1238 	if (backup_dirty)
1239 		vmw_dmabuf_unreference(&res->backup);
1240 
1241 	return ret;
1242 }
1243 
1244 /**
1245  * vmw_resource_reserve - Reserve a resource for command submission
1246  *
1247  * @res:            The resource to reserve.
1248  *
1249  * This function takes the resource off the LRU list and make sure
1250  * a backup buffer is present for guest-backed resources. However,
1251  * the buffer may not be bound to the resource at this point.
1252  *
1253  */
1254 int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1255 {
1256 	struct vmw_private *dev_priv = res->dev_priv;
1257 	int ret;
1258 
1259 	write_lock(&dev_priv->resource_lock);
1260 	list_del_init(&res->lru_head);
1261 	write_unlock(&dev_priv->resource_lock);
1262 
1263 	if (res->func->needs_backup && res->backup == NULL &&
1264 	    !no_backup) {
1265 		ret = vmw_resource_buf_alloc(res, true);
1266 		if (unlikely(ret != 0))
1267 			return ret;
1268 	}
1269 
1270 	return 0;
1271 }
1272 
1273 /**
1274  * vmw_resource_backoff_reservation - Unreserve and unreference a
1275  *                                    backup buffer
1276  *.
1277  * @val_buf:        Backup buffer information.
1278  */
1279 static void
1280 vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1281 {
1282 	struct list_head val_list;
1283 
1284 	if (likely(val_buf->bo == NULL))
1285 		return;
1286 
1287 	INIT_LIST_HEAD(&val_list);
1288 	list_add_tail(&val_buf->head, &val_list);
1289 	ttm_eu_backoff_reservation(NULL, &val_list);
1290 	ttm_bo_unref(&val_buf->bo);
1291 }
1292 
1293 /**
1294  * vmw_resource_do_evict - Evict a resource, and transfer its data
1295  *                         to a backup buffer.
1296  *
1297  * @res:            The resource to evict.
1298  * @interruptible:  Whether to wait interruptible.
1299  */
1300 int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1301 {
1302 	struct ttm_validate_buffer val_buf;
1303 	const struct vmw_res_func *func = res->func;
1304 	int ret;
1305 
1306 	BUG_ON(!func->may_evict);
1307 
1308 	val_buf.bo = NULL;
1309 	ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1310 	if (unlikely(ret != 0))
1311 		return ret;
1312 
1313 	if (unlikely(func->unbind != NULL &&
1314 		     (!func->needs_backup || !list_empty(&res->mob_head)))) {
1315 		ret = func->unbind(res, res->res_dirty, &val_buf);
1316 		if (unlikely(ret != 0))
1317 			goto out_no_unbind;
1318 		list_del_init(&res->mob_head);
1319 	}
1320 	ret = func->destroy(res);
1321 	res->backup_dirty = true;
1322 	res->res_dirty = false;
1323 out_no_unbind:
1324 	vmw_resource_backoff_reservation(&val_buf);
1325 
1326 	return ret;
1327 }
1328 
1329 
1330 /**
1331  * vmw_resource_validate - Make a resource up-to-date and visible
1332  *                         to the device.
1333  *
1334  * @res:            The resource to make visible to the device.
1335  *
1336  * On succesful return, any backup DMA buffer pointed to by @res->backup will
1337  * be reserved and validated.
1338  * On hardware resource shortage, this function will repeatedly evict
1339  * resources of the same type until the validation succeeds.
1340  */
1341 int vmw_resource_validate(struct vmw_resource *res)
1342 {
1343 	int ret;
1344 	struct vmw_resource *evict_res;
1345 	struct vmw_private *dev_priv = res->dev_priv;
1346 	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1347 	struct ttm_validate_buffer val_buf;
1348 	unsigned err_count = 0;
1349 
1350 	if (likely(!res->func->may_evict))
1351 		return 0;
1352 
1353 	val_buf.bo = NULL;
1354 	if (res->backup)
1355 		val_buf.bo = &res->backup->base;
1356 	do {
1357 		ret = vmw_resource_do_validate(res, &val_buf);
1358 		if (likely(ret != -EBUSY))
1359 			break;
1360 
1361 		write_lock(&dev_priv->resource_lock);
1362 		if (list_empty(lru_list) || !res->func->may_evict) {
1363 			DRM_ERROR("Out of device device resources "
1364 				  "for %s.\n", res->func->type_name);
1365 			ret = -EBUSY;
1366 			write_unlock(&dev_priv->resource_lock);
1367 			break;
1368 		}
1369 
1370 		evict_res = vmw_resource_reference
1371 			(list_first_entry(lru_list, struct vmw_resource,
1372 					  lru_head));
1373 		list_del_init(&evict_res->lru_head);
1374 
1375 		write_unlock(&dev_priv->resource_lock);
1376 
1377 		ret = vmw_resource_do_evict(evict_res, true);
1378 		if (unlikely(ret != 0)) {
1379 			write_lock(&dev_priv->resource_lock);
1380 			list_add_tail(&evict_res->lru_head, lru_list);
1381 			write_unlock(&dev_priv->resource_lock);
1382 			if (ret == -ERESTARTSYS ||
1383 			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1384 				vmw_resource_unreference(&evict_res);
1385 				goto out_no_validate;
1386 			}
1387 		}
1388 
1389 		vmw_resource_unreference(&evict_res);
1390 	} while (1);
1391 
1392 	if (unlikely(ret != 0))
1393 		goto out_no_validate;
1394 	else if (!res->func->needs_backup && res->backup) {
1395 		list_del_init(&res->mob_head);
1396 		vmw_dmabuf_unreference(&res->backup);
1397 	}
1398 
1399 	return 0;
1400 
1401 out_no_validate:
1402 	return ret;
1403 }
1404 
1405 /**
1406  * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1407  *                       object without unreserving it.
1408  *
1409  * @bo:             Pointer to the struct ttm_buffer_object to fence.
1410  * @fence:          Pointer to the fence. If NULL, this function will
1411  *                  insert a fence into the command stream..
1412  *
1413  * Contrary to the ttm_eu version of this function, it takes only
1414  * a single buffer object instead of a list, and it also doesn't
1415  * unreserve the buffer object, which needs to be done separately.
1416  */
1417 void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1418 			 struct vmw_fence_obj *fence)
1419 {
1420 	struct ttm_bo_device *bdev = bo->bdev;
1421 	struct ttm_bo_driver *driver = bdev->driver;
1422 	struct vmw_fence_obj *old_fence_obj;
1423 	struct vmw_private *dev_priv =
1424 		container_of(bdev, struct vmw_private, bdev);
1425 
1426 	if (fence == NULL)
1427 		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1428 	else
1429 		driver->sync_obj_ref(fence);
1430 
1431 	spin_lock(&bdev->fence_lock);
1432 
1433 	old_fence_obj = bo->sync_obj;
1434 	bo->sync_obj = fence;
1435 
1436 	spin_unlock(&bdev->fence_lock);
1437 
1438 	if (old_fence_obj)
1439 		vmw_fence_obj_unreference(&old_fence_obj);
1440 }
1441 
1442 /**
1443  * vmw_resource_move_notify - TTM move_notify_callback
1444  *
1445  * @bo:             The TTM buffer object about to move.
1446  * @mem:            The truct ttm_mem_reg indicating to what memory
1447  *                  region the move is taking place.
1448  *
1449  * Evicts the Guest Backed hardware resource if the backup
1450  * buffer is being moved out of MOB memory.
1451  * Note that this function should not race with the resource
1452  * validation code as long as it accesses only members of struct
1453  * resource that remain static while bo::res is !NULL and
1454  * while we have @bo reserved. struct resource::backup is *not* a
1455  * static member. The resource validation code will take care
1456  * to set @bo::res to NULL, while having @bo reserved when the
1457  * buffer is no longer bound to the resource, so @bo:res can be
1458  * used to determine whether there is a need to unbind and whether
1459  * it is safe to unbind.
1460  */
1461 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1462 			      struct ttm_mem_reg *mem)
1463 {
1464 	struct vmw_dma_buffer *dma_buf;
1465 
1466 	if (mem == NULL)
1467 		return;
1468 
1469 	if (bo->destroy != vmw_dmabuf_bo_free &&
1470 	    bo->destroy != vmw_user_dmabuf_destroy)
1471 		return;
1472 
1473 	dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1474 
1475 	if (mem->mem_type != VMW_PL_MOB) {
1476 		struct vmw_resource *res, *n;
1477 		struct ttm_bo_device *bdev = bo->bdev;
1478 		struct ttm_validate_buffer val_buf;
1479 
1480 		val_buf.bo = bo;
1481 
1482 		list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1483 
1484 			if (unlikely(res->func->unbind == NULL))
1485 				continue;
1486 
1487 			(void) res->func->unbind(res, true, &val_buf);
1488 			res->backup_dirty = true;
1489 			res->res_dirty = false;
1490 			list_del_init(&res->mob_head);
1491 		}
1492 
1493 		spin_lock(&bdev->fence_lock);
1494 		(void) ttm_bo_wait(bo, false, false, false);
1495 		spin_unlock(&bdev->fence_lock);
1496 	}
1497 }
1498 
1499 /**
1500  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1501  *
1502  * @res:            The resource being queried.
1503  */
1504 bool vmw_resource_needs_backup(const struct vmw_resource *res)
1505 {
1506 	return res->func->needs_backup;
1507 }
1508 
1509 /**
1510  * vmw_resource_evict_type - Evict all resources of a specific type
1511  *
1512  * @dev_priv:       Pointer to a device private struct
1513  * @type:           The resource type to evict
1514  *
1515  * To avoid thrashing starvation or as part of the hibernation sequence,
1516  * try to evict all evictable resources of a specific type.
1517  */
1518 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1519 				    enum vmw_res_type type)
1520 {
1521 	struct list_head *lru_list = &dev_priv->res_lru[type];
1522 	struct vmw_resource *evict_res;
1523 	unsigned err_count = 0;
1524 	int ret;
1525 
1526 	do {
1527 		write_lock(&dev_priv->resource_lock);
1528 
1529 		if (list_empty(lru_list))
1530 			goto out_unlock;
1531 
1532 		evict_res = vmw_resource_reference(
1533 			list_first_entry(lru_list, struct vmw_resource,
1534 					 lru_head));
1535 		list_del_init(&evict_res->lru_head);
1536 		write_unlock(&dev_priv->resource_lock);
1537 
1538 		ret = vmw_resource_do_evict(evict_res, false);
1539 		if (unlikely(ret != 0)) {
1540 			write_lock(&dev_priv->resource_lock);
1541 			list_add_tail(&evict_res->lru_head, lru_list);
1542 			write_unlock(&dev_priv->resource_lock);
1543 			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1544 				vmw_resource_unreference(&evict_res);
1545 				return;
1546 			}
1547 		}
1548 
1549 		vmw_resource_unreference(&evict_res);
1550 	} while (1);
1551 
1552 out_unlock:
1553 	write_unlock(&dev_priv->resource_lock);
1554 }
1555 
1556 /**
1557  * vmw_resource_evict_all - Evict all evictable resources
1558  *
1559  * @dev_priv:       Pointer to a device private struct
1560  *
1561  * To avoid thrashing starvation or as part of the hibernation sequence,
1562  * evict all evictable resources. In particular this means that all
1563  * guest-backed resources that are registered with the device are
1564  * evicted and the OTable becomes clean.
1565  */
1566 void vmw_resource_evict_all(struct vmw_private *dev_priv)
1567 {
1568 	enum vmw_res_type type;
1569 
1570 	mutex_lock(&dev_priv->cmdbuf_mutex);
1571 
1572 	for (type = 0; type < vmw_res_max; ++type)
1573 		vmw_resource_evict_type(dev_priv, type);
1574 
1575 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1576 }
1577