1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include <drm/drmP.h>
33 #include "vmwgfx_resource_priv.h"
34 
35 #define VMW_RES_EVICT_ERR_COUNT 10
36 
37 struct vmw_user_dma_buffer {
38 	struct ttm_prime_object prime;
39 	struct vmw_dma_buffer dma;
40 };
41 
42 struct vmw_bo_user_rep {
43 	uint32_t handle;
44 	uint64_t map_handle;
45 };
46 
47 struct vmw_stream {
48 	struct vmw_resource res;
49 	uint32_t stream_id;
50 };
51 
52 struct vmw_user_stream {
53 	struct ttm_base_object base;
54 	struct vmw_stream stream;
55 };
56 
57 
58 static uint64_t vmw_user_stream_size;
59 
60 static const struct vmw_res_func vmw_stream_func = {
61 	.res_type = vmw_res_stream,
62 	.needs_backup = false,
63 	.may_evict = false,
64 	.type_name = "video streams",
65 	.backup_placement = NULL,
66 	.create = NULL,
67 	.destroy = NULL,
68 	.bind = NULL,
69 	.unbind = NULL
70 };
71 
72 static inline struct vmw_dma_buffer *
73 vmw_dma_buffer(struct ttm_buffer_object *bo)
74 {
75 	return container_of(bo, struct vmw_dma_buffer, base);
76 }
77 
78 static inline struct vmw_user_dma_buffer *
79 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
80 {
81 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
82 	return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
83 }
84 
85 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
86 {
87 	kref_get(&res->kref);
88 	return res;
89 }
90 
91 struct vmw_resource *
92 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
93 {
94 	return kref_get_unless_zero(&res->kref) ? res : NULL;
95 }
96 
97 /**
98  * vmw_resource_release_id - release a resource id to the id manager.
99  *
100  * @res: Pointer to the resource.
101  *
102  * Release the resource id to the resource id manager and set it to -1
103  */
104 void vmw_resource_release_id(struct vmw_resource *res)
105 {
106 	struct vmw_private *dev_priv = res->dev_priv;
107 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
108 
109 	write_lock(&dev_priv->resource_lock);
110 	if (res->id != -1)
111 		idr_remove(idr, res->id);
112 	res->id = -1;
113 	write_unlock(&dev_priv->resource_lock);
114 }
115 
116 static void vmw_resource_release(struct kref *kref)
117 {
118 	struct vmw_resource *res =
119 	    container_of(kref, struct vmw_resource, kref);
120 	struct vmw_private *dev_priv = res->dev_priv;
121 	int id;
122 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
123 
124 	res->avail = false;
125 	list_del_init(&res->lru_head);
126 	write_unlock(&dev_priv->resource_lock);
127 	if (res->backup) {
128 		struct ttm_buffer_object *bo = &res->backup->base;
129 
130 		ttm_bo_reserve(bo, false, false, false, 0);
131 		if (!list_empty(&res->mob_head) &&
132 		    res->func->unbind != NULL) {
133 			struct ttm_validate_buffer val_buf;
134 
135 			val_buf.bo = bo;
136 			res->func->unbind(res, false, &val_buf);
137 		}
138 		res->backup_dirty = false;
139 		list_del_init(&res->mob_head);
140 		ttm_bo_unreserve(bo);
141 		vmw_dmabuf_unreference(&res->backup);
142 	}
143 
144 	if (likely(res->hw_destroy != NULL)) {
145 		res->hw_destroy(res);
146 		mutex_lock(&dev_priv->binding_mutex);
147 		vmw_context_binding_res_list_kill(&res->binding_head);
148 		mutex_unlock(&dev_priv->binding_mutex);
149 	}
150 
151 	id = res->id;
152 	if (res->res_free != NULL)
153 		res->res_free(res);
154 	else
155 		kfree(res);
156 
157 	write_lock(&dev_priv->resource_lock);
158 
159 	if (id != -1)
160 		idr_remove(idr, id);
161 }
162 
163 void vmw_resource_unreference(struct vmw_resource **p_res)
164 {
165 	struct vmw_resource *res = *p_res;
166 	struct vmw_private *dev_priv = res->dev_priv;
167 
168 	*p_res = NULL;
169 	write_lock(&dev_priv->resource_lock);
170 	kref_put(&res->kref, vmw_resource_release);
171 	write_unlock(&dev_priv->resource_lock);
172 }
173 
174 
175 /**
176  * vmw_resource_alloc_id - release a resource id to the id manager.
177  *
178  * @res: Pointer to the resource.
179  *
180  * Allocate the lowest free resource from the resource manager, and set
181  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
182  */
183 int vmw_resource_alloc_id(struct vmw_resource *res)
184 {
185 	struct vmw_private *dev_priv = res->dev_priv;
186 	int ret;
187 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
188 
189 	BUG_ON(res->id != -1);
190 
191 	idr_preload(GFP_KERNEL);
192 	write_lock(&dev_priv->resource_lock);
193 
194 	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
195 	if (ret >= 0)
196 		res->id = ret;
197 
198 	write_unlock(&dev_priv->resource_lock);
199 	idr_preload_end();
200 	return ret < 0 ? ret : 0;
201 }
202 
203 /**
204  * vmw_resource_init - initialize a struct vmw_resource
205  *
206  * @dev_priv:       Pointer to a device private struct.
207  * @res:            The struct vmw_resource to initialize.
208  * @obj_type:       Resource object type.
209  * @delay_id:       Boolean whether to defer device id allocation until
210  *                  the first validation.
211  * @res_free:       Resource destructor.
212  * @func:           Resource function table.
213  */
214 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
215 		      bool delay_id,
216 		      void (*res_free) (struct vmw_resource *res),
217 		      const struct vmw_res_func *func)
218 {
219 	kref_init(&res->kref);
220 	res->hw_destroy = NULL;
221 	res->res_free = res_free;
222 	res->avail = false;
223 	res->dev_priv = dev_priv;
224 	res->func = func;
225 	INIT_LIST_HEAD(&res->lru_head);
226 	INIT_LIST_HEAD(&res->mob_head);
227 	INIT_LIST_HEAD(&res->binding_head);
228 	res->id = -1;
229 	res->backup = NULL;
230 	res->backup_offset = 0;
231 	res->backup_dirty = false;
232 	res->res_dirty = false;
233 	if (delay_id)
234 		return 0;
235 	else
236 		return vmw_resource_alloc_id(res);
237 }
238 
239 /**
240  * vmw_resource_activate
241  *
242  * @res:        Pointer to the newly created resource
243  * @hw_destroy: Destroy function. NULL if none.
244  *
245  * Activate a resource after the hardware has been made aware of it.
246  * Set tye destroy function to @destroy. Typically this frees the
247  * resource and destroys the hardware resources associated with it.
248  * Activate basically means that the function vmw_resource_lookup will
249  * find it.
250  */
251 void vmw_resource_activate(struct vmw_resource *res,
252 			   void (*hw_destroy) (struct vmw_resource *))
253 {
254 	struct vmw_private *dev_priv = res->dev_priv;
255 
256 	write_lock(&dev_priv->resource_lock);
257 	res->avail = true;
258 	res->hw_destroy = hw_destroy;
259 	write_unlock(&dev_priv->resource_lock);
260 }
261 
262 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
263 					 struct idr *idr, int id)
264 {
265 	struct vmw_resource *res;
266 
267 	read_lock(&dev_priv->resource_lock);
268 	res = idr_find(idr, id);
269 	if (res && res->avail)
270 		kref_get(&res->kref);
271 	else
272 		res = NULL;
273 	read_unlock(&dev_priv->resource_lock);
274 
275 	if (unlikely(res == NULL))
276 		return NULL;
277 
278 	return res;
279 }
280 
281 /**
282  * vmw_user_resource_lookup_handle - lookup a struct resource from a
283  * TTM user-space handle and perform basic type checks
284  *
285  * @dev_priv:     Pointer to a device private struct
286  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
287  * @handle:       The TTM user-space handle
288  * @converter:    Pointer to an object describing the resource type
289  * @p_res:        On successful return the location pointed to will contain
290  *                a pointer to a refcounted struct vmw_resource.
291  *
292  * If the handle can't be found or is associated with an incorrect resource
293  * type, -EINVAL will be returned.
294  */
295 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
296 				    struct ttm_object_file *tfile,
297 				    uint32_t handle,
298 				    const struct vmw_user_resource_conv
299 				    *converter,
300 				    struct vmw_resource **p_res)
301 {
302 	struct ttm_base_object *base;
303 	struct vmw_resource *res;
304 	int ret = -EINVAL;
305 
306 	base = ttm_base_object_lookup(tfile, handle);
307 	if (unlikely(base == NULL))
308 		return -EINVAL;
309 
310 	if (unlikely(ttm_base_object_type(base) != converter->object_type))
311 		goto out_bad_resource;
312 
313 	res = converter->base_obj_to_res(base);
314 
315 	read_lock(&dev_priv->resource_lock);
316 	if (!res->avail || res->res_free != converter->res_free) {
317 		read_unlock(&dev_priv->resource_lock);
318 		goto out_bad_resource;
319 	}
320 
321 	kref_get(&res->kref);
322 	read_unlock(&dev_priv->resource_lock);
323 
324 	*p_res = res;
325 	ret = 0;
326 
327 out_bad_resource:
328 	ttm_base_object_unref(&base);
329 
330 	return ret;
331 }
332 
333 /**
334  * Helper function that looks either a surface or dmabuf.
335  *
336  * The pointer this pointed at by out_surf and out_buf needs to be null.
337  */
338 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
339 			   struct ttm_object_file *tfile,
340 			   uint32_t handle,
341 			   struct vmw_surface **out_surf,
342 			   struct vmw_dma_buffer **out_buf)
343 {
344 	struct vmw_resource *res;
345 	int ret;
346 
347 	BUG_ON(*out_surf || *out_buf);
348 
349 	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
350 					      user_surface_converter,
351 					      &res);
352 	if (!ret) {
353 		*out_surf = vmw_res_to_srf(res);
354 		return 0;
355 	}
356 
357 	*out_surf = NULL;
358 	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
359 	return ret;
360 }
361 
362 /**
363  * Buffer management.
364  */
365 
366 /**
367  * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
368  *
369  * @dev_priv: Pointer to a struct vmw_private identifying the device.
370  * @size: The requested buffer size.
371  * @user: Whether this is an ordinary dma buffer or a user dma buffer.
372  */
373 static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
374 				  bool user)
375 {
376 	static size_t struct_size, user_struct_size;
377 	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
378 	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
379 
380 	if (unlikely(struct_size == 0)) {
381 		size_t backend_size = ttm_round_pot(vmw_tt_size);
382 
383 		struct_size = backend_size +
384 			ttm_round_pot(sizeof(struct vmw_dma_buffer));
385 		user_struct_size = backend_size +
386 			ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
387 	}
388 
389 	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
390 		page_array_size +=
391 			ttm_round_pot(num_pages * sizeof(dma_addr_t));
392 
393 	return ((user) ? user_struct_size : struct_size) +
394 		page_array_size;
395 }
396 
397 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
398 {
399 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
400 
401 	kfree(vmw_bo);
402 }
403 
404 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
405 {
406 	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
407 
408 	ttm_prime_object_kfree(vmw_user_bo, prime);
409 }
410 
411 int vmw_dmabuf_init(struct vmw_private *dev_priv,
412 		    struct vmw_dma_buffer *vmw_bo,
413 		    size_t size, struct ttm_placement *placement,
414 		    bool interruptible,
415 		    void (*bo_free) (struct ttm_buffer_object *bo))
416 {
417 	struct ttm_bo_device *bdev = &dev_priv->bdev;
418 	size_t acc_size;
419 	int ret;
420 	bool user = (bo_free == &vmw_user_dmabuf_destroy);
421 
422 	BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
423 
424 	acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
425 	memset(vmw_bo, 0, sizeof(*vmw_bo));
426 
427 	INIT_LIST_HEAD(&vmw_bo->res_list);
428 
429 	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
430 			  ttm_bo_type_device, placement,
431 			  0, interruptible,
432 			  NULL, acc_size, NULL, bo_free);
433 	return ret;
434 }
435 
436 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
437 {
438 	struct vmw_user_dma_buffer *vmw_user_bo;
439 	struct ttm_base_object *base = *p_base;
440 	struct ttm_buffer_object *bo;
441 
442 	*p_base = NULL;
443 
444 	if (unlikely(base == NULL))
445 		return;
446 
447 	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
448 				   prime.base);
449 	bo = &vmw_user_bo->dma.base;
450 	ttm_bo_unref(&bo);
451 }
452 
453 static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
454 					    enum ttm_ref_type ref_type)
455 {
456 	struct vmw_user_dma_buffer *user_bo;
457 	user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
458 
459 	switch (ref_type) {
460 	case TTM_REF_SYNCCPU_WRITE:
461 		ttm_bo_synccpu_write_release(&user_bo->dma.base);
462 		break;
463 	default:
464 		BUG();
465 	}
466 }
467 
468 /**
469  * vmw_user_dmabuf_alloc - Allocate a user dma buffer
470  *
471  * @dev_priv: Pointer to a struct device private.
472  * @tfile: Pointer to a struct ttm_object_file on which to register the user
473  * object.
474  * @size: Size of the dma buffer.
475  * @shareable: Boolean whether the buffer is shareable with other open files.
476  * @handle: Pointer to where the handle value should be assigned.
477  * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
478  * should be assigned.
479  */
480 int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
481 			  struct ttm_object_file *tfile,
482 			  uint32_t size,
483 			  bool shareable,
484 			  uint32_t *handle,
485 			  struct vmw_dma_buffer **p_dma_buf)
486 {
487 	struct vmw_user_dma_buffer *user_bo;
488 	struct ttm_buffer_object *tmp;
489 	int ret;
490 
491 	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
492 	if (unlikely(user_bo == NULL)) {
493 		DRM_ERROR("Failed to allocate a buffer.\n");
494 		return -ENOMEM;
495 	}
496 
497 	ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
498 			      (dev_priv->has_mob) ?
499 			      &vmw_sys_placement :
500 			      &vmw_vram_sys_placement, true,
501 			      &vmw_user_dmabuf_destroy);
502 	if (unlikely(ret != 0))
503 		return ret;
504 
505 	tmp = ttm_bo_reference(&user_bo->dma.base);
506 	ret = ttm_prime_object_init(tfile,
507 				    size,
508 				    &user_bo->prime,
509 				    shareable,
510 				    ttm_buffer_type,
511 				    &vmw_user_dmabuf_release,
512 				    &vmw_user_dmabuf_ref_obj_release);
513 	if (unlikely(ret != 0)) {
514 		ttm_bo_unref(&tmp);
515 		goto out_no_base_object;
516 	}
517 
518 	*p_dma_buf = &user_bo->dma;
519 	*handle = user_bo->prime.base.hash.key;
520 
521 out_no_base_object:
522 	return ret;
523 }
524 
525 /**
526  * vmw_user_dmabuf_verify_access - verify access permissions on this
527  * buffer object.
528  *
529  * @bo: Pointer to the buffer object being accessed
530  * @tfile: Identifying the caller.
531  */
532 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
533 				  struct ttm_object_file *tfile)
534 {
535 	struct vmw_user_dma_buffer *vmw_user_bo;
536 
537 	if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
538 		return -EPERM;
539 
540 	vmw_user_bo = vmw_user_dma_buffer(bo);
541 	return (vmw_user_bo->prime.base.tfile == tfile ||
542 		vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
543 }
544 
545 /**
546  * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
547  * access, idling previous GPU operations on the buffer and optionally
548  * blocking it for further command submissions.
549  *
550  * @user_bo: Pointer to the buffer object being grabbed for CPU access
551  * @tfile: Identifying the caller.
552  * @flags: Flags indicating how the grab should be performed.
553  *
554  * A blocking grab will be automatically released when @tfile is closed.
555  */
556 static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
557 					struct ttm_object_file *tfile,
558 					uint32_t flags)
559 {
560 	struct ttm_buffer_object *bo = &user_bo->dma.base;
561 	bool existed;
562 	int ret;
563 
564 	if (flags & drm_vmw_synccpu_allow_cs) {
565 		struct ttm_bo_device *bdev = bo->bdev;
566 
567 		spin_lock(&bdev->fence_lock);
568 		ret = ttm_bo_wait(bo, false, true,
569 				  !!(flags & drm_vmw_synccpu_dontblock));
570 		spin_unlock(&bdev->fence_lock);
571 		return ret;
572 	}
573 
574 	ret = ttm_bo_synccpu_write_grab
575 		(bo, !!(flags & drm_vmw_synccpu_dontblock));
576 	if (unlikely(ret != 0))
577 		return ret;
578 
579 	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
580 				 TTM_REF_SYNCCPU_WRITE, &existed);
581 	if (ret != 0 || existed)
582 		ttm_bo_synccpu_write_release(&user_bo->dma.base);
583 
584 	return ret;
585 }
586 
587 /**
588  * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
589  * and unblock command submission on the buffer if blocked.
590  *
591  * @handle: Handle identifying the buffer object.
592  * @tfile: Identifying the caller.
593  * @flags: Flags indicating the type of release.
594  */
595 static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
596 					   struct ttm_object_file *tfile,
597 					   uint32_t flags)
598 {
599 	if (!(flags & drm_vmw_synccpu_allow_cs))
600 		return ttm_ref_object_base_unref(tfile, handle,
601 						 TTM_REF_SYNCCPU_WRITE);
602 
603 	return 0;
604 }
605 
606 /**
607  * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
608  * functionality.
609  *
610  * @dev: Identifies the drm device.
611  * @data: Pointer to the ioctl argument.
612  * @file_priv: Identifies the caller.
613  *
614  * This function checks the ioctl arguments for validity and calls the
615  * relevant synccpu functions.
616  */
617 int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
618 				  struct drm_file *file_priv)
619 {
620 	struct drm_vmw_synccpu_arg *arg =
621 		(struct drm_vmw_synccpu_arg *) data;
622 	struct vmw_dma_buffer *dma_buf;
623 	struct vmw_user_dma_buffer *user_bo;
624 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
625 	int ret;
626 
627 	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
628 	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
629 			       drm_vmw_synccpu_dontblock |
630 			       drm_vmw_synccpu_allow_cs)) != 0) {
631 		DRM_ERROR("Illegal synccpu flags.\n");
632 		return -EINVAL;
633 	}
634 
635 	switch (arg->op) {
636 	case drm_vmw_synccpu_grab:
637 		ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
638 		if (unlikely(ret != 0))
639 			return ret;
640 
641 		user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
642 				       dma);
643 		ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
644 		vmw_dmabuf_unreference(&dma_buf);
645 		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
646 			     ret != -EBUSY)) {
647 			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
648 				  (unsigned int) arg->handle);
649 			return ret;
650 		}
651 		break;
652 	case drm_vmw_synccpu_release:
653 		ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
654 						      arg->flags);
655 		if (unlikely(ret != 0)) {
656 			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
657 				  (unsigned int) arg->handle);
658 			return ret;
659 		}
660 		break;
661 	default:
662 		DRM_ERROR("Invalid synccpu operation.\n");
663 		return -EINVAL;
664 	}
665 
666 	return 0;
667 }
668 
669 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
670 			   struct drm_file *file_priv)
671 {
672 	struct vmw_private *dev_priv = vmw_priv(dev);
673 	union drm_vmw_alloc_dmabuf_arg *arg =
674 	    (union drm_vmw_alloc_dmabuf_arg *)data;
675 	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
676 	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
677 	struct vmw_dma_buffer *dma_buf;
678 	uint32_t handle;
679 	struct vmw_master *vmaster = vmw_master(file_priv->master);
680 	int ret;
681 
682 	ret = ttm_read_lock(&vmaster->lock, true);
683 	if (unlikely(ret != 0))
684 		return ret;
685 
686 	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
687 				    req->size, false, &handle, &dma_buf);
688 	if (unlikely(ret != 0))
689 		goto out_no_dmabuf;
690 
691 	rep->handle = handle;
692 	rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
693 	rep->cur_gmr_id = handle;
694 	rep->cur_gmr_offset = 0;
695 
696 	vmw_dmabuf_unreference(&dma_buf);
697 
698 out_no_dmabuf:
699 	ttm_read_unlock(&vmaster->lock);
700 
701 	return ret;
702 }
703 
704 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
705 			   struct drm_file *file_priv)
706 {
707 	struct drm_vmw_unref_dmabuf_arg *arg =
708 	    (struct drm_vmw_unref_dmabuf_arg *)data;
709 
710 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
711 					 arg->handle,
712 					 TTM_REF_USAGE);
713 }
714 
715 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
716 			   uint32_t handle, struct vmw_dma_buffer **out)
717 {
718 	struct vmw_user_dma_buffer *vmw_user_bo;
719 	struct ttm_base_object *base;
720 
721 	base = ttm_base_object_lookup(tfile, handle);
722 	if (unlikely(base == NULL)) {
723 		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
724 		       (unsigned long)handle);
725 		return -ESRCH;
726 	}
727 
728 	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
729 		ttm_base_object_unref(&base);
730 		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
731 		       (unsigned long)handle);
732 		return -EINVAL;
733 	}
734 
735 	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
736 				   prime.base);
737 	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
738 	ttm_base_object_unref(&base);
739 	*out = &vmw_user_bo->dma;
740 
741 	return 0;
742 }
743 
744 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
745 			      struct vmw_dma_buffer *dma_buf,
746 			      uint32_t *handle)
747 {
748 	struct vmw_user_dma_buffer *user_bo;
749 
750 	if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
751 		return -EINVAL;
752 
753 	user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
754 
755 	*handle = user_bo->prime.base.hash.key;
756 	return ttm_ref_object_add(tfile, &user_bo->prime.base,
757 				  TTM_REF_USAGE, NULL);
758 }
759 
760 /*
761  * Stream management
762  */
763 
764 static void vmw_stream_destroy(struct vmw_resource *res)
765 {
766 	struct vmw_private *dev_priv = res->dev_priv;
767 	struct vmw_stream *stream;
768 	int ret;
769 
770 	DRM_INFO("%s: unref\n", __func__);
771 	stream = container_of(res, struct vmw_stream, res);
772 
773 	ret = vmw_overlay_unref(dev_priv, stream->stream_id);
774 	WARN_ON(ret != 0);
775 }
776 
777 static int vmw_stream_init(struct vmw_private *dev_priv,
778 			   struct vmw_stream *stream,
779 			   void (*res_free) (struct vmw_resource *res))
780 {
781 	struct vmw_resource *res = &stream->res;
782 	int ret;
783 
784 	ret = vmw_resource_init(dev_priv, res, false, res_free,
785 				&vmw_stream_func);
786 
787 	if (unlikely(ret != 0)) {
788 		if (res_free == NULL)
789 			kfree(stream);
790 		else
791 			res_free(&stream->res);
792 		return ret;
793 	}
794 
795 	ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
796 	if (ret) {
797 		vmw_resource_unreference(&res);
798 		return ret;
799 	}
800 
801 	DRM_INFO("%s: claimed\n", __func__);
802 
803 	vmw_resource_activate(&stream->res, vmw_stream_destroy);
804 	return 0;
805 }
806 
807 static void vmw_user_stream_free(struct vmw_resource *res)
808 {
809 	struct vmw_user_stream *stream =
810 	    container_of(res, struct vmw_user_stream, stream.res);
811 	struct vmw_private *dev_priv = res->dev_priv;
812 
813 	ttm_base_object_kfree(stream, base);
814 	ttm_mem_global_free(vmw_mem_glob(dev_priv),
815 			    vmw_user_stream_size);
816 }
817 
818 /**
819  * This function is called when user space has no more references on the
820  * base object. It releases the base-object's reference on the resource object.
821  */
822 
823 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
824 {
825 	struct ttm_base_object *base = *p_base;
826 	struct vmw_user_stream *stream =
827 	    container_of(base, struct vmw_user_stream, base);
828 	struct vmw_resource *res = &stream->stream.res;
829 
830 	*p_base = NULL;
831 	vmw_resource_unreference(&res);
832 }
833 
834 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
835 			   struct drm_file *file_priv)
836 {
837 	struct vmw_private *dev_priv = vmw_priv(dev);
838 	struct vmw_resource *res;
839 	struct vmw_user_stream *stream;
840 	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
841 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
842 	struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
843 	int ret = 0;
844 
845 
846 	res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
847 	if (unlikely(res == NULL))
848 		return -EINVAL;
849 
850 	if (res->res_free != &vmw_user_stream_free) {
851 		ret = -EINVAL;
852 		goto out;
853 	}
854 
855 	stream = container_of(res, struct vmw_user_stream, stream.res);
856 	if (stream->base.tfile != tfile) {
857 		ret = -EINVAL;
858 		goto out;
859 	}
860 
861 	ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
862 out:
863 	vmw_resource_unreference(&res);
864 	return ret;
865 }
866 
867 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
868 			   struct drm_file *file_priv)
869 {
870 	struct vmw_private *dev_priv = vmw_priv(dev);
871 	struct vmw_user_stream *stream;
872 	struct vmw_resource *res;
873 	struct vmw_resource *tmp;
874 	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
875 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
876 	struct vmw_master *vmaster = vmw_master(file_priv->master);
877 	int ret;
878 
879 	/*
880 	 * Approximate idr memory usage with 128 bytes. It will be limited
881 	 * by maximum number_of streams anyway?
882 	 */
883 
884 	if (unlikely(vmw_user_stream_size == 0))
885 		vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
886 
887 	ret = ttm_read_lock(&vmaster->lock, true);
888 	if (unlikely(ret != 0))
889 		return ret;
890 
891 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
892 				   vmw_user_stream_size,
893 				   false, true);
894 	if (unlikely(ret != 0)) {
895 		if (ret != -ERESTARTSYS)
896 			DRM_ERROR("Out of graphics memory for stream"
897 				  " creation.\n");
898 		goto out_unlock;
899 	}
900 
901 
902 	stream = kmalloc(sizeof(*stream), GFP_KERNEL);
903 	if (unlikely(stream == NULL)) {
904 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
905 				    vmw_user_stream_size);
906 		ret = -ENOMEM;
907 		goto out_unlock;
908 	}
909 
910 	res = &stream->stream.res;
911 	stream->base.shareable = false;
912 	stream->base.tfile = NULL;
913 
914 	/*
915 	 * From here on, the destructor takes over resource freeing.
916 	 */
917 
918 	ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
919 	if (unlikely(ret != 0))
920 		goto out_unlock;
921 
922 	tmp = vmw_resource_reference(res);
923 	ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
924 				   &vmw_user_stream_base_release, NULL);
925 
926 	if (unlikely(ret != 0)) {
927 		vmw_resource_unreference(&tmp);
928 		goto out_err;
929 	}
930 
931 	arg->stream_id = res->id;
932 out_err:
933 	vmw_resource_unreference(&res);
934 out_unlock:
935 	ttm_read_unlock(&vmaster->lock);
936 	return ret;
937 }
938 
939 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
940 			   struct ttm_object_file *tfile,
941 			   uint32_t *inout_id, struct vmw_resource **out)
942 {
943 	struct vmw_user_stream *stream;
944 	struct vmw_resource *res;
945 	int ret;
946 
947 	res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
948 				  *inout_id);
949 	if (unlikely(res == NULL))
950 		return -EINVAL;
951 
952 	if (res->res_free != &vmw_user_stream_free) {
953 		ret = -EINVAL;
954 		goto err_ref;
955 	}
956 
957 	stream = container_of(res, struct vmw_user_stream, stream.res);
958 	if (stream->base.tfile != tfile) {
959 		ret = -EPERM;
960 		goto err_ref;
961 	}
962 
963 	*inout_id = stream->stream.stream_id;
964 	*out = res;
965 	return 0;
966 err_ref:
967 	vmw_resource_unreference(&res);
968 	return ret;
969 }
970 
971 
972 /**
973  * vmw_dumb_create - Create a dumb kms buffer
974  *
975  * @file_priv: Pointer to a struct drm_file identifying the caller.
976  * @dev: Pointer to the drm device.
977  * @args: Pointer to a struct drm_mode_create_dumb structure
978  *
979  * This is a driver callback for the core drm create_dumb functionality.
980  * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
981  * that the arguments have a different format.
982  */
983 int vmw_dumb_create(struct drm_file *file_priv,
984 		    struct drm_device *dev,
985 		    struct drm_mode_create_dumb *args)
986 {
987 	struct vmw_private *dev_priv = vmw_priv(dev);
988 	struct vmw_master *vmaster = vmw_master(file_priv->master);
989 	struct vmw_dma_buffer *dma_buf;
990 	int ret;
991 
992 	args->pitch = args->width * ((args->bpp + 7) / 8);
993 	args->size = args->pitch * args->height;
994 
995 	ret = ttm_read_lock(&vmaster->lock, true);
996 	if (unlikely(ret != 0))
997 		return ret;
998 
999 	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1000 				    args->size, false, &args->handle,
1001 				    &dma_buf);
1002 	if (unlikely(ret != 0))
1003 		goto out_no_dmabuf;
1004 
1005 	vmw_dmabuf_unreference(&dma_buf);
1006 out_no_dmabuf:
1007 	ttm_read_unlock(&vmaster->lock);
1008 	return ret;
1009 }
1010 
1011 /**
1012  * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1013  *
1014  * @file_priv: Pointer to a struct drm_file identifying the caller.
1015  * @dev: Pointer to the drm device.
1016  * @handle: Handle identifying the dumb buffer.
1017  * @offset: The address space offset returned.
1018  *
1019  * This is a driver callback for the core drm dumb_map_offset functionality.
1020  */
1021 int vmw_dumb_map_offset(struct drm_file *file_priv,
1022 			struct drm_device *dev, uint32_t handle,
1023 			uint64_t *offset)
1024 {
1025 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1026 	struct vmw_dma_buffer *out_buf;
1027 	int ret;
1028 
1029 	ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
1030 	if (ret != 0)
1031 		return -EINVAL;
1032 
1033 	*offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1034 	vmw_dmabuf_unreference(&out_buf);
1035 	return 0;
1036 }
1037 
1038 /**
1039  * vmw_dumb_destroy - Destroy a dumb boffer
1040  *
1041  * @file_priv: Pointer to a struct drm_file identifying the caller.
1042  * @dev: Pointer to the drm device.
1043  * @handle: Handle identifying the dumb buffer.
1044  *
1045  * This is a driver callback for the core drm dumb_destroy functionality.
1046  */
1047 int vmw_dumb_destroy(struct drm_file *file_priv,
1048 		     struct drm_device *dev,
1049 		     uint32_t handle)
1050 {
1051 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1052 					 handle, TTM_REF_USAGE);
1053 }
1054 
1055 /**
1056  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
1057  *
1058  * @res:            The resource for which to allocate a backup buffer.
1059  * @interruptible:  Whether any sleeps during allocation should be
1060  *                  performed while interruptible.
1061  */
1062 static int vmw_resource_buf_alloc(struct vmw_resource *res,
1063 				  bool interruptible)
1064 {
1065 	unsigned long size =
1066 		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
1067 	struct vmw_dma_buffer *backup;
1068 	int ret;
1069 
1070 	if (likely(res->backup)) {
1071 		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
1072 		return 0;
1073 	}
1074 
1075 	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
1076 	if (unlikely(backup == NULL))
1077 		return -ENOMEM;
1078 
1079 	ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
1080 			      res->func->backup_placement,
1081 			      interruptible,
1082 			      &vmw_dmabuf_bo_free);
1083 	if (unlikely(ret != 0))
1084 		goto out_no_dmabuf;
1085 
1086 	res->backup = backup;
1087 
1088 out_no_dmabuf:
1089 	return ret;
1090 }
1091 
1092 /**
1093  * vmw_resource_do_validate - Make a resource up-to-date and visible
1094  *                            to the device.
1095  *
1096  * @res:            The resource to make visible to the device.
1097  * @val_buf:        Information about a buffer possibly
1098  *                  containing backup data if a bind operation is needed.
1099  *
1100  * On hardware resource shortage, this function returns -EBUSY and
1101  * should be retried once resources have been freed up.
1102  */
1103 static int vmw_resource_do_validate(struct vmw_resource *res,
1104 				    struct ttm_validate_buffer *val_buf)
1105 {
1106 	int ret = 0;
1107 	const struct vmw_res_func *func = res->func;
1108 
1109 	if (unlikely(res->id == -1)) {
1110 		ret = func->create(res);
1111 		if (unlikely(ret != 0))
1112 			return ret;
1113 	}
1114 
1115 	if (func->bind &&
1116 	    ((func->needs_backup && list_empty(&res->mob_head) &&
1117 	      val_buf->bo != NULL) ||
1118 	     (!func->needs_backup && val_buf->bo != NULL))) {
1119 		ret = func->bind(res, val_buf);
1120 		if (unlikely(ret != 0))
1121 			goto out_bind_failed;
1122 		if (func->needs_backup)
1123 			list_add_tail(&res->mob_head, &res->backup->res_list);
1124 	}
1125 
1126 	/*
1127 	 * Only do this on write operations, and move to
1128 	 * vmw_resource_unreserve if it can be called after
1129 	 * backup buffers have been unreserved. Otherwise
1130 	 * sort out locking.
1131 	 */
1132 	res->res_dirty = true;
1133 
1134 	return 0;
1135 
1136 out_bind_failed:
1137 	func->destroy(res);
1138 
1139 	return ret;
1140 }
1141 
1142 /**
1143  * vmw_resource_unreserve - Unreserve a resource previously reserved for
1144  * command submission.
1145  *
1146  * @res:               Pointer to the struct vmw_resource to unreserve.
1147  * @new_backup:        Pointer to new backup buffer if command submission
1148  *                     switched.
1149  * @new_backup_offset: New backup offset if @new_backup is !NULL.
1150  *
1151  * Currently unreserving a resource means putting it back on the device's
1152  * resource lru list, so that it can be evicted if necessary.
1153  */
1154 void vmw_resource_unreserve(struct vmw_resource *res,
1155 			    struct vmw_dma_buffer *new_backup,
1156 			    unsigned long new_backup_offset)
1157 {
1158 	struct vmw_private *dev_priv = res->dev_priv;
1159 
1160 	if (!list_empty(&res->lru_head))
1161 		return;
1162 
1163 	if (new_backup && new_backup != res->backup) {
1164 
1165 		if (res->backup) {
1166 			lockdep_assert_held(&res->backup->base.resv->lock.base);
1167 			list_del_init(&res->mob_head);
1168 			vmw_dmabuf_unreference(&res->backup);
1169 		}
1170 
1171 		res->backup = vmw_dmabuf_reference(new_backup);
1172 		lockdep_assert_held(&new_backup->base.resv->lock.base);
1173 		list_add_tail(&res->mob_head, &new_backup->res_list);
1174 	}
1175 	if (new_backup)
1176 		res->backup_offset = new_backup_offset;
1177 
1178 	if (!res->func->may_evict || res->id == -1)
1179 		return;
1180 
1181 	write_lock(&dev_priv->resource_lock);
1182 	list_add_tail(&res->lru_head,
1183 		      &res->dev_priv->res_lru[res->func->res_type]);
1184 	write_unlock(&dev_priv->resource_lock);
1185 }
1186 
1187 /**
1188  * vmw_resource_check_buffer - Check whether a backup buffer is needed
1189  *                             for a resource and in that case, allocate
1190  *                             one, reserve and validate it.
1191  *
1192  * @res:            The resource for which to allocate a backup buffer.
1193  * @interruptible:  Whether any sleeps during allocation should be
1194  *                  performed while interruptible.
1195  * @val_buf:        On successful return contains data about the
1196  *                  reserved and validated backup buffer.
1197  */
1198 static int
1199 vmw_resource_check_buffer(struct vmw_resource *res,
1200 			  bool interruptible,
1201 			  struct ttm_validate_buffer *val_buf)
1202 {
1203 	struct list_head val_list;
1204 	bool backup_dirty = false;
1205 	int ret;
1206 
1207 	if (unlikely(res->backup == NULL)) {
1208 		ret = vmw_resource_buf_alloc(res, interruptible);
1209 		if (unlikely(ret != 0))
1210 			return ret;
1211 	}
1212 
1213 	INIT_LIST_HEAD(&val_list);
1214 	val_buf->bo = ttm_bo_reference(&res->backup->base);
1215 	list_add_tail(&val_buf->head, &val_list);
1216 	ret = ttm_eu_reserve_buffers(NULL, &val_list);
1217 	if (unlikely(ret != 0))
1218 		goto out_no_reserve;
1219 
1220 	if (res->func->needs_backup && list_empty(&res->mob_head))
1221 		return 0;
1222 
1223 	backup_dirty = res->backup_dirty;
1224 	ret = ttm_bo_validate(&res->backup->base,
1225 			      res->func->backup_placement,
1226 			      true, false);
1227 
1228 	if (unlikely(ret != 0))
1229 		goto out_no_validate;
1230 
1231 	return 0;
1232 
1233 out_no_validate:
1234 	ttm_eu_backoff_reservation(NULL, &val_list);
1235 out_no_reserve:
1236 	ttm_bo_unref(&val_buf->bo);
1237 	if (backup_dirty)
1238 		vmw_dmabuf_unreference(&res->backup);
1239 
1240 	return ret;
1241 }
1242 
1243 /**
1244  * vmw_resource_reserve - Reserve a resource for command submission
1245  *
1246  * @res:            The resource to reserve.
1247  *
1248  * This function takes the resource off the LRU list and make sure
1249  * a backup buffer is present for guest-backed resources. However,
1250  * the buffer may not be bound to the resource at this point.
1251  *
1252  */
1253 int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1254 {
1255 	struct vmw_private *dev_priv = res->dev_priv;
1256 	int ret;
1257 
1258 	write_lock(&dev_priv->resource_lock);
1259 	list_del_init(&res->lru_head);
1260 	write_unlock(&dev_priv->resource_lock);
1261 
1262 	if (res->func->needs_backup && res->backup == NULL &&
1263 	    !no_backup) {
1264 		ret = vmw_resource_buf_alloc(res, true);
1265 		if (unlikely(ret != 0))
1266 			return ret;
1267 	}
1268 
1269 	return 0;
1270 }
1271 
1272 /**
1273  * vmw_resource_backoff_reservation - Unreserve and unreference a
1274  *                                    backup buffer
1275  *.
1276  * @val_buf:        Backup buffer information.
1277  */
1278 static void
1279 vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1280 {
1281 	struct list_head val_list;
1282 
1283 	if (likely(val_buf->bo == NULL))
1284 		return;
1285 
1286 	INIT_LIST_HEAD(&val_list);
1287 	list_add_tail(&val_buf->head, &val_list);
1288 	ttm_eu_backoff_reservation(NULL, &val_list);
1289 	ttm_bo_unref(&val_buf->bo);
1290 }
1291 
1292 /**
1293  * vmw_resource_do_evict - Evict a resource, and transfer its data
1294  *                         to a backup buffer.
1295  *
1296  * @res:            The resource to evict.
1297  * @interruptible:  Whether to wait interruptible.
1298  */
1299 int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1300 {
1301 	struct ttm_validate_buffer val_buf;
1302 	const struct vmw_res_func *func = res->func;
1303 	int ret;
1304 
1305 	BUG_ON(!func->may_evict);
1306 
1307 	val_buf.bo = NULL;
1308 	ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1309 	if (unlikely(ret != 0))
1310 		return ret;
1311 
1312 	if (unlikely(func->unbind != NULL &&
1313 		     (!func->needs_backup || !list_empty(&res->mob_head)))) {
1314 		ret = func->unbind(res, res->res_dirty, &val_buf);
1315 		if (unlikely(ret != 0))
1316 			goto out_no_unbind;
1317 		list_del_init(&res->mob_head);
1318 	}
1319 	ret = func->destroy(res);
1320 	res->backup_dirty = true;
1321 	res->res_dirty = false;
1322 out_no_unbind:
1323 	vmw_resource_backoff_reservation(&val_buf);
1324 
1325 	return ret;
1326 }
1327 
1328 
1329 /**
1330  * vmw_resource_validate - Make a resource up-to-date and visible
1331  *                         to the device.
1332  *
1333  * @res:            The resource to make visible to the device.
1334  *
1335  * On succesful return, any backup DMA buffer pointed to by @res->backup will
1336  * be reserved and validated.
1337  * On hardware resource shortage, this function will repeatedly evict
1338  * resources of the same type until the validation succeeds.
1339  */
1340 int vmw_resource_validate(struct vmw_resource *res)
1341 {
1342 	int ret;
1343 	struct vmw_resource *evict_res;
1344 	struct vmw_private *dev_priv = res->dev_priv;
1345 	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1346 	struct ttm_validate_buffer val_buf;
1347 	unsigned err_count = 0;
1348 
1349 	if (likely(!res->func->may_evict))
1350 		return 0;
1351 
1352 	val_buf.bo = NULL;
1353 	if (res->backup)
1354 		val_buf.bo = &res->backup->base;
1355 	do {
1356 		ret = vmw_resource_do_validate(res, &val_buf);
1357 		if (likely(ret != -EBUSY))
1358 			break;
1359 
1360 		write_lock(&dev_priv->resource_lock);
1361 		if (list_empty(lru_list) || !res->func->may_evict) {
1362 			DRM_ERROR("Out of device device resources "
1363 				  "for %s.\n", res->func->type_name);
1364 			ret = -EBUSY;
1365 			write_unlock(&dev_priv->resource_lock);
1366 			break;
1367 		}
1368 
1369 		evict_res = vmw_resource_reference
1370 			(list_first_entry(lru_list, struct vmw_resource,
1371 					  lru_head));
1372 		list_del_init(&evict_res->lru_head);
1373 
1374 		write_unlock(&dev_priv->resource_lock);
1375 
1376 		ret = vmw_resource_do_evict(evict_res, true);
1377 		if (unlikely(ret != 0)) {
1378 			write_lock(&dev_priv->resource_lock);
1379 			list_add_tail(&evict_res->lru_head, lru_list);
1380 			write_unlock(&dev_priv->resource_lock);
1381 			if (ret == -ERESTARTSYS ||
1382 			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1383 				vmw_resource_unreference(&evict_res);
1384 				goto out_no_validate;
1385 			}
1386 		}
1387 
1388 		vmw_resource_unreference(&evict_res);
1389 	} while (1);
1390 
1391 	if (unlikely(ret != 0))
1392 		goto out_no_validate;
1393 	else if (!res->func->needs_backup && res->backup) {
1394 		list_del_init(&res->mob_head);
1395 		vmw_dmabuf_unreference(&res->backup);
1396 	}
1397 
1398 	return 0;
1399 
1400 out_no_validate:
1401 	return ret;
1402 }
1403 
1404 /**
1405  * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1406  *                       object without unreserving it.
1407  *
1408  * @bo:             Pointer to the struct ttm_buffer_object to fence.
1409  * @fence:          Pointer to the fence. If NULL, this function will
1410  *                  insert a fence into the command stream..
1411  *
1412  * Contrary to the ttm_eu version of this function, it takes only
1413  * a single buffer object instead of a list, and it also doesn't
1414  * unreserve the buffer object, which needs to be done separately.
1415  */
1416 void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1417 			 struct vmw_fence_obj *fence)
1418 {
1419 	struct ttm_bo_device *bdev = bo->bdev;
1420 	struct ttm_bo_driver *driver = bdev->driver;
1421 	struct vmw_fence_obj *old_fence_obj;
1422 	struct vmw_private *dev_priv =
1423 		container_of(bdev, struct vmw_private, bdev);
1424 
1425 	if (fence == NULL)
1426 		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1427 	else
1428 		driver->sync_obj_ref(fence);
1429 
1430 	spin_lock(&bdev->fence_lock);
1431 
1432 	old_fence_obj = bo->sync_obj;
1433 	bo->sync_obj = fence;
1434 
1435 	spin_unlock(&bdev->fence_lock);
1436 
1437 	if (old_fence_obj)
1438 		vmw_fence_obj_unreference(&old_fence_obj);
1439 }
1440 
1441 /**
1442  * vmw_resource_move_notify - TTM move_notify_callback
1443  *
1444  * @bo:             The TTM buffer object about to move.
1445  * @mem:            The truct ttm_mem_reg indicating to what memory
1446  *                  region the move is taking place.
1447  *
1448  * Evicts the Guest Backed hardware resource if the backup
1449  * buffer is being moved out of MOB memory.
1450  * Note that this function should not race with the resource
1451  * validation code as long as it accesses only members of struct
1452  * resource that remain static while bo::res is !NULL and
1453  * while we have @bo reserved. struct resource::backup is *not* a
1454  * static member. The resource validation code will take care
1455  * to set @bo::res to NULL, while having @bo reserved when the
1456  * buffer is no longer bound to the resource, so @bo:res can be
1457  * used to determine whether there is a need to unbind and whether
1458  * it is safe to unbind.
1459  */
1460 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1461 			      struct ttm_mem_reg *mem)
1462 {
1463 	struct vmw_dma_buffer *dma_buf;
1464 
1465 	if (mem == NULL)
1466 		return;
1467 
1468 	if (bo->destroy != vmw_dmabuf_bo_free &&
1469 	    bo->destroy != vmw_user_dmabuf_destroy)
1470 		return;
1471 
1472 	dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1473 
1474 	if (mem->mem_type != VMW_PL_MOB) {
1475 		struct vmw_resource *res, *n;
1476 		struct ttm_bo_device *bdev = bo->bdev;
1477 		struct ttm_validate_buffer val_buf;
1478 
1479 		val_buf.bo = bo;
1480 
1481 		list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1482 
1483 			if (unlikely(res->func->unbind == NULL))
1484 				continue;
1485 
1486 			(void) res->func->unbind(res, true, &val_buf);
1487 			res->backup_dirty = true;
1488 			res->res_dirty = false;
1489 			list_del_init(&res->mob_head);
1490 		}
1491 
1492 		spin_lock(&bdev->fence_lock);
1493 		(void) ttm_bo_wait(bo, false, false, false);
1494 		spin_unlock(&bdev->fence_lock);
1495 	}
1496 }
1497 
1498 /**
1499  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1500  *
1501  * @res:            The resource being queried.
1502  */
1503 bool vmw_resource_needs_backup(const struct vmw_resource *res)
1504 {
1505 	return res->func->needs_backup;
1506 }
1507 
1508 /**
1509  * vmw_resource_evict_type - Evict all resources of a specific type
1510  *
1511  * @dev_priv:       Pointer to a device private struct
1512  * @type:           The resource type to evict
1513  *
1514  * To avoid thrashing starvation or as part of the hibernation sequence,
1515  * try to evict all evictable resources of a specific type.
1516  */
1517 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1518 				    enum vmw_res_type type)
1519 {
1520 	struct list_head *lru_list = &dev_priv->res_lru[type];
1521 	struct vmw_resource *evict_res;
1522 	unsigned err_count = 0;
1523 	int ret;
1524 
1525 	do {
1526 		write_lock(&dev_priv->resource_lock);
1527 
1528 		if (list_empty(lru_list))
1529 			goto out_unlock;
1530 
1531 		evict_res = vmw_resource_reference(
1532 			list_first_entry(lru_list, struct vmw_resource,
1533 					 lru_head));
1534 		list_del_init(&evict_res->lru_head);
1535 		write_unlock(&dev_priv->resource_lock);
1536 
1537 		ret = vmw_resource_do_evict(evict_res, false);
1538 		if (unlikely(ret != 0)) {
1539 			write_lock(&dev_priv->resource_lock);
1540 			list_add_tail(&evict_res->lru_head, lru_list);
1541 			write_unlock(&dev_priv->resource_lock);
1542 			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1543 				vmw_resource_unreference(&evict_res);
1544 				return;
1545 			}
1546 		}
1547 
1548 		vmw_resource_unreference(&evict_res);
1549 	} while (1);
1550 
1551 out_unlock:
1552 	write_unlock(&dev_priv->resource_lock);
1553 }
1554 
1555 /**
1556  * vmw_resource_evict_all - Evict all evictable resources
1557  *
1558  * @dev_priv:       Pointer to a device private struct
1559  *
1560  * To avoid thrashing starvation or as part of the hibernation sequence,
1561  * evict all evictable resources. In particular this means that all
1562  * guest-backed resources that are registered with the device are
1563  * evicted and the OTable becomes clean.
1564  */
1565 void vmw_resource_evict_all(struct vmw_private *dev_priv)
1566 {
1567 	enum vmw_res_type type;
1568 
1569 	mutex_lock(&dev_priv->cmdbuf_mutex);
1570 
1571 	for (type = 0; type < vmw_res_max; ++type)
1572 		vmw_resource_evict_type(dev_priv, type);
1573 
1574 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1575 }
1576