1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include <drm/drmP.h>
33 #include "vmwgfx_resource_priv.h"
34 
35 #define VMW_RES_EVICT_ERR_COUNT 10
36 
37 struct vmw_user_dma_buffer {
38 	struct ttm_prime_object prime;
39 	struct vmw_dma_buffer dma;
40 };
41 
42 struct vmw_bo_user_rep {
43 	uint32_t handle;
44 	uint64_t map_handle;
45 };
46 
47 struct vmw_stream {
48 	struct vmw_resource res;
49 	uint32_t stream_id;
50 };
51 
52 struct vmw_user_stream {
53 	struct ttm_base_object base;
54 	struct vmw_stream stream;
55 };
56 
57 
58 static uint64_t vmw_user_stream_size;
59 
60 static const struct vmw_res_func vmw_stream_func = {
61 	.res_type = vmw_res_stream,
62 	.needs_backup = false,
63 	.may_evict = false,
64 	.type_name = "video streams",
65 	.backup_placement = NULL,
66 	.create = NULL,
67 	.destroy = NULL,
68 	.bind = NULL,
69 	.unbind = NULL
70 };
71 
72 static inline struct vmw_dma_buffer *
73 vmw_dma_buffer(struct ttm_buffer_object *bo)
74 {
75 	return container_of(bo, struct vmw_dma_buffer, base);
76 }
77 
78 static inline struct vmw_user_dma_buffer *
79 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
80 {
81 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
82 	return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
83 }
84 
85 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
86 {
87 	kref_get(&res->kref);
88 	return res;
89 }
90 
91 
92 /**
93  * vmw_resource_release_id - release a resource id to the id manager.
94  *
95  * @res: Pointer to the resource.
96  *
97  * Release the resource id to the resource id manager and set it to -1
98  */
99 void vmw_resource_release_id(struct vmw_resource *res)
100 {
101 	struct vmw_private *dev_priv = res->dev_priv;
102 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
103 
104 	write_lock(&dev_priv->resource_lock);
105 	if (res->id != -1)
106 		idr_remove(idr, res->id);
107 	res->id = -1;
108 	write_unlock(&dev_priv->resource_lock);
109 }
110 
111 static void vmw_resource_release(struct kref *kref)
112 {
113 	struct vmw_resource *res =
114 	    container_of(kref, struct vmw_resource, kref);
115 	struct vmw_private *dev_priv = res->dev_priv;
116 	int id;
117 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
118 
119 	res->avail = false;
120 	list_del_init(&res->lru_head);
121 	write_unlock(&dev_priv->resource_lock);
122 	if (res->backup) {
123 		struct ttm_buffer_object *bo = &res->backup->base;
124 
125 		ttm_bo_reserve(bo, false, false, false, 0);
126 		if (!list_empty(&res->mob_head) &&
127 		    res->func->unbind != NULL) {
128 			struct ttm_validate_buffer val_buf;
129 
130 			val_buf.bo = bo;
131 			res->func->unbind(res, false, &val_buf);
132 		}
133 		res->backup_dirty = false;
134 		list_del_init(&res->mob_head);
135 		ttm_bo_unreserve(bo);
136 		vmw_dmabuf_unreference(&res->backup);
137 	}
138 
139 	if (likely(res->hw_destroy != NULL))
140 		res->hw_destroy(res);
141 
142 	id = res->id;
143 	if (res->res_free != NULL)
144 		res->res_free(res);
145 	else
146 		kfree(res);
147 
148 	write_lock(&dev_priv->resource_lock);
149 
150 	if (id != -1)
151 		idr_remove(idr, id);
152 }
153 
154 void vmw_resource_unreference(struct vmw_resource **p_res)
155 {
156 	struct vmw_resource *res = *p_res;
157 	struct vmw_private *dev_priv = res->dev_priv;
158 
159 	*p_res = NULL;
160 	write_lock(&dev_priv->resource_lock);
161 	kref_put(&res->kref, vmw_resource_release);
162 	write_unlock(&dev_priv->resource_lock);
163 }
164 
165 
166 /**
167  * vmw_resource_alloc_id - release a resource id to the id manager.
168  *
169  * @res: Pointer to the resource.
170  *
171  * Allocate the lowest free resource from the resource manager, and set
172  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
173  */
174 int vmw_resource_alloc_id(struct vmw_resource *res)
175 {
176 	struct vmw_private *dev_priv = res->dev_priv;
177 	int ret;
178 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
179 
180 	BUG_ON(res->id != -1);
181 
182 	idr_preload(GFP_KERNEL);
183 	write_lock(&dev_priv->resource_lock);
184 
185 	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
186 	if (ret >= 0)
187 		res->id = ret;
188 
189 	write_unlock(&dev_priv->resource_lock);
190 	idr_preload_end();
191 	return ret < 0 ? ret : 0;
192 }
193 
194 /**
195  * vmw_resource_init - initialize a struct vmw_resource
196  *
197  * @dev_priv:       Pointer to a device private struct.
198  * @res:            The struct vmw_resource to initialize.
199  * @obj_type:       Resource object type.
200  * @delay_id:       Boolean whether to defer device id allocation until
201  *                  the first validation.
202  * @res_free:       Resource destructor.
203  * @func:           Resource function table.
204  */
205 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
206 		      bool delay_id,
207 		      void (*res_free) (struct vmw_resource *res),
208 		      const struct vmw_res_func *func)
209 {
210 	kref_init(&res->kref);
211 	res->hw_destroy = NULL;
212 	res->res_free = res_free;
213 	res->avail = false;
214 	res->dev_priv = dev_priv;
215 	res->func = func;
216 	INIT_LIST_HEAD(&res->lru_head);
217 	INIT_LIST_HEAD(&res->mob_head);
218 	INIT_LIST_HEAD(&res->binding_head);
219 	res->id = -1;
220 	res->backup = NULL;
221 	res->backup_offset = 0;
222 	res->backup_dirty = false;
223 	res->res_dirty = false;
224 	if (delay_id)
225 		return 0;
226 	else
227 		return vmw_resource_alloc_id(res);
228 }
229 
230 /**
231  * vmw_resource_activate
232  *
233  * @res:        Pointer to the newly created resource
234  * @hw_destroy: Destroy function. NULL if none.
235  *
236  * Activate a resource after the hardware has been made aware of it.
237  * Set tye destroy function to @destroy. Typically this frees the
238  * resource and destroys the hardware resources associated with it.
239  * Activate basically means that the function vmw_resource_lookup will
240  * find it.
241  */
242 void vmw_resource_activate(struct vmw_resource *res,
243 			   void (*hw_destroy) (struct vmw_resource *))
244 {
245 	struct vmw_private *dev_priv = res->dev_priv;
246 
247 	write_lock(&dev_priv->resource_lock);
248 	res->avail = true;
249 	res->hw_destroy = hw_destroy;
250 	write_unlock(&dev_priv->resource_lock);
251 }
252 
253 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
254 					 struct idr *idr, int id)
255 {
256 	struct vmw_resource *res;
257 
258 	read_lock(&dev_priv->resource_lock);
259 	res = idr_find(idr, id);
260 	if (res && res->avail)
261 		kref_get(&res->kref);
262 	else
263 		res = NULL;
264 	read_unlock(&dev_priv->resource_lock);
265 
266 	if (unlikely(res == NULL))
267 		return NULL;
268 
269 	return res;
270 }
271 
272 /**
273  * vmw_user_resource_lookup_handle - lookup a struct resource from a
274  * TTM user-space handle and perform basic type checks
275  *
276  * @dev_priv:     Pointer to a device private struct
277  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
278  * @handle:       The TTM user-space handle
279  * @converter:    Pointer to an object describing the resource type
280  * @p_res:        On successful return the location pointed to will contain
281  *                a pointer to a refcounted struct vmw_resource.
282  *
283  * If the handle can't be found or is associated with an incorrect resource
284  * type, -EINVAL will be returned.
285  */
286 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
287 				    struct ttm_object_file *tfile,
288 				    uint32_t handle,
289 				    const struct vmw_user_resource_conv
290 				    *converter,
291 				    struct vmw_resource **p_res)
292 {
293 	struct ttm_base_object *base;
294 	struct vmw_resource *res;
295 	int ret = -EINVAL;
296 
297 	base = ttm_base_object_lookup(tfile, handle);
298 	if (unlikely(base == NULL))
299 		return -EINVAL;
300 
301 	if (unlikely(ttm_base_object_type(base) != converter->object_type))
302 		goto out_bad_resource;
303 
304 	res = converter->base_obj_to_res(base);
305 
306 	read_lock(&dev_priv->resource_lock);
307 	if (!res->avail || res->res_free != converter->res_free) {
308 		read_unlock(&dev_priv->resource_lock);
309 		goto out_bad_resource;
310 	}
311 
312 	kref_get(&res->kref);
313 	read_unlock(&dev_priv->resource_lock);
314 
315 	*p_res = res;
316 	ret = 0;
317 
318 out_bad_resource:
319 	ttm_base_object_unref(&base);
320 
321 	return ret;
322 }
323 
324 /**
325  * Helper function that looks either a surface or dmabuf.
326  *
327  * The pointer this pointed at by out_surf and out_buf needs to be null.
328  */
329 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
330 			   struct ttm_object_file *tfile,
331 			   uint32_t handle,
332 			   struct vmw_surface **out_surf,
333 			   struct vmw_dma_buffer **out_buf)
334 {
335 	struct vmw_resource *res;
336 	int ret;
337 
338 	BUG_ON(*out_surf || *out_buf);
339 
340 	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
341 					      user_surface_converter,
342 					      &res);
343 	if (!ret) {
344 		*out_surf = vmw_res_to_srf(res);
345 		return 0;
346 	}
347 
348 	*out_surf = NULL;
349 	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
350 	return ret;
351 }
352 
353 /**
354  * Buffer management.
355  */
356 
357 /**
358  * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
359  *
360  * @dev_priv: Pointer to a struct vmw_private identifying the device.
361  * @size: The requested buffer size.
362  * @user: Whether this is an ordinary dma buffer or a user dma buffer.
363  */
364 static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
365 				  bool user)
366 {
367 	static size_t struct_size, user_struct_size;
368 	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
369 	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
370 
371 	if (unlikely(struct_size == 0)) {
372 		size_t backend_size = ttm_round_pot(vmw_tt_size);
373 
374 		struct_size = backend_size +
375 			ttm_round_pot(sizeof(struct vmw_dma_buffer));
376 		user_struct_size = backend_size +
377 			ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
378 	}
379 
380 	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
381 		page_array_size +=
382 			ttm_round_pot(num_pages * sizeof(dma_addr_t));
383 
384 	return ((user) ? user_struct_size : struct_size) +
385 		page_array_size;
386 }
387 
388 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
389 {
390 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
391 
392 	kfree(vmw_bo);
393 }
394 
395 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
396 {
397 	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
398 
399 	ttm_prime_object_kfree(vmw_user_bo, prime);
400 }
401 
402 int vmw_dmabuf_init(struct vmw_private *dev_priv,
403 		    struct vmw_dma_buffer *vmw_bo,
404 		    size_t size, struct ttm_placement *placement,
405 		    bool interruptible,
406 		    void (*bo_free) (struct ttm_buffer_object *bo))
407 {
408 	struct ttm_bo_device *bdev = &dev_priv->bdev;
409 	size_t acc_size;
410 	int ret;
411 	bool user = (bo_free == &vmw_user_dmabuf_destroy);
412 
413 	BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
414 
415 	acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
416 	memset(vmw_bo, 0, sizeof(*vmw_bo));
417 
418 	INIT_LIST_HEAD(&vmw_bo->res_list);
419 
420 	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
421 			  (user) ? ttm_bo_type_device :
422 			  ttm_bo_type_kernel, placement,
423 			  0, interruptible,
424 			  NULL, acc_size, NULL, bo_free);
425 	return ret;
426 }
427 
428 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
429 {
430 	struct vmw_user_dma_buffer *vmw_user_bo;
431 	struct ttm_base_object *base = *p_base;
432 	struct ttm_buffer_object *bo;
433 
434 	*p_base = NULL;
435 
436 	if (unlikely(base == NULL))
437 		return;
438 
439 	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
440 				   prime.base);
441 	bo = &vmw_user_bo->dma.base;
442 	ttm_bo_unref(&bo);
443 }
444 
445 static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
446 					    enum ttm_ref_type ref_type)
447 {
448 	struct vmw_user_dma_buffer *user_bo;
449 	user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
450 
451 	switch (ref_type) {
452 	case TTM_REF_SYNCCPU_WRITE:
453 		ttm_bo_synccpu_write_release(&user_bo->dma.base);
454 		break;
455 	default:
456 		BUG();
457 	}
458 }
459 
460 /**
461  * vmw_user_dmabuf_alloc - Allocate a user dma buffer
462  *
463  * @dev_priv: Pointer to a struct device private.
464  * @tfile: Pointer to a struct ttm_object_file on which to register the user
465  * object.
466  * @size: Size of the dma buffer.
467  * @shareable: Boolean whether the buffer is shareable with other open files.
468  * @handle: Pointer to where the handle value should be assigned.
469  * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
470  * should be assigned.
471  */
472 int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
473 			  struct ttm_object_file *tfile,
474 			  uint32_t size,
475 			  bool shareable,
476 			  uint32_t *handle,
477 			  struct vmw_dma_buffer **p_dma_buf)
478 {
479 	struct vmw_user_dma_buffer *user_bo;
480 	struct ttm_buffer_object *tmp;
481 	int ret;
482 
483 	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
484 	if (unlikely(user_bo == NULL)) {
485 		DRM_ERROR("Failed to allocate a buffer.\n");
486 		return -ENOMEM;
487 	}
488 
489 	ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
490 			      (dev_priv->has_mob) ?
491 			      &vmw_sys_placement :
492 			      &vmw_vram_sys_placement, true,
493 			      &vmw_user_dmabuf_destroy);
494 	if (unlikely(ret != 0))
495 		return ret;
496 
497 	tmp = ttm_bo_reference(&user_bo->dma.base);
498 	ret = ttm_prime_object_init(tfile,
499 				    size,
500 				    &user_bo->prime,
501 				    shareable,
502 				    ttm_buffer_type,
503 				    &vmw_user_dmabuf_release,
504 				    &vmw_user_dmabuf_ref_obj_release);
505 	if (unlikely(ret != 0)) {
506 		ttm_bo_unref(&tmp);
507 		goto out_no_base_object;
508 	}
509 
510 	*p_dma_buf = &user_bo->dma;
511 	*handle = user_bo->prime.base.hash.key;
512 
513 out_no_base_object:
514 	return ret;
515 }
516 
517 /**
518  * vmw_user_dmabuf_verify_access - verify access permissions on this
519  * buffer object.
520  *
521  * @bo: Pointer to the buffer object being accessed
522  * @tfile: Identifying the caller.
523  */
524 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
525 				  struct ttm_object_file *tfile)
526 {
527 	struct vmw_user_dma_buffer *vmw_user_bo;
528 
529 	if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
530 		return -EPERM;
531 
532 	vmw_user_bo = vmw_user_dma_buffer(bo);
533 	return (vmw_user_bo->prime.base.tfile == tfile ||
534 		vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
535 }
536 
537 /**
538  * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
539  * access, idling previous GPU operations on the buffer and optionally
540  * blocking it for further command submissions.
541  *
542  * @user_bo: Pointer to the buffer object being grabbed for CPU access
543  * @tfile: Identifying the caller.
544  * @flags: Flags indicating how the grab should be performed.
545  *
546  * A blocking grab will be automatically released when @tfile is closed.
547  */
548 static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
549 					struct ttm_object_file *tfile,
550 					uint32_t flags)
551 {
552 	struct ttm_buffer_object *bo = &user_bo->dma.base;
553 	bool existed;
554 	int ret;
555 
556 	if (flags & drm_vmw_synccpu_allow_cs) {
557 		struct ttm_bo_device *bdev = bo->bdev;
558 
559 		spin_lock(&bdev->fence_lock);
560 		ret = ttm_bo_wait(bo, false, true,
561 				  !!(flags & drm_vmw_synccpu_dontblock));
562 		spin_unlock(&bdev->fence_lock);
563 		return ret;
564 	}
565 
566 	ret = ttm_bo_synccpu_write_grab
567 		(bo, !!(flags & drm_vmw_synccpu_dontblock));
568 	if (unlikely(ret != 0))
569 		return ret;
570 
571 	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
572 				 TTM_REF_SYNCCPU_WRITE, &existed);
573 	if (ret != 0 || existed)
574 		ttm_bo_synccpu_write_release(&user_bo->dma.base);
575 
576 	return ret;
577 }
578 
579 /**
580  * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
581  * and unblock command submission on the buffer if blocked.
582  *
583  * @handle: Handle identifying the buffer object.
584  * @tfile: Identifying the caller.
585  * @flags: Flags indicating the type of release.
586  */
587 static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
588 					   struct ttm_object_file *tfile,
589 					   uint32_t flags)
590 {
591 	if (!(flags & drm_vmw_synccpu_allow_cs))
592 		return ttm_ref_object_base_unref(tfile, handle,
593 						 TTM_REF_SYNCCPU_WRITE);
594 
595 	return 0;
596 }
597 
598 /**
599  * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
600  * functionality.
601  *
602  * @dev: Identifies the drm device.
603  * @data: Pointer to the ioctl argument.
604  * @file_priv: Identifies the caller.
605  *
606  * This function checks the ioctl arguments for validity and calls the
607  * relevant synccpu functions.
608  */
609 int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
610 				  struct drm_file *file_priv)
611 {
612 	struct drm_vmw_synccpu_arg *arg =
613 		(struct drm_vmw_synccpu_arg *) data;
614 	struct vmw_dma_buffer *dma_buf;
615 	struct vmw_user_dma_buffer *user_bo;
616 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
617 	int ret;
618 
619 	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
620 	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
621 			       drm_vmw_synccpu_dontblock |
622 			       drm_vmw_synccpu_allow_cs)) != 0) {
623 		DRM_ERROR("Illegal synccpu flags.\n");
624 		return -EINVAL;
625 	}
626 
627 	switch (arg->op) {
628 	case drm_vmw_synccpu_grab:
629 		ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
630 		if (unlikely(ret != 0))
631 			return ret;
632 
633 		user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
634 				       dma);
635 		ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
636 		vmw_dmabuf_unreference(&dma_buf);
637 		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
638 			     ret != -EBUSY)) {
639 			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
640 				  (unsigned int) arg->handle);
641 			return ret;
642 		}
643 		break;
644 	case drm_vmw_synccpu_release:
645 		ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
646 						      arg->flags);
647 		if (unlikely(ret != 0)) {
648 			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
649 				  (unsigned int) arg->handle);
650 			return ret;
651 		}
652 		break;
653 	default:
654 		DRM_ERROR("Invalid synccpu operation.\n");
655 		return -EINVAL;
656 	}
657 
658 	return 0;
659 }
660 
661 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
662 			   struct drm_file *file_priv)
663 {
664 	struct vmw_private *dev_priv = vmw_priv(dev);
665 	union drm_vmw_alloc_dmabuf_arg *arg =
666 	    (union drm_vmw_alloc_dmabuf_arg *)data;
667 	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
668 	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
669 	struct vmw_dma_buffer *dma_buf;
670 	uint32_t handle;
671 	struct vmw_master *vmaster = vmw_master(file_priv->master);
672 	int ret;
673 
674 	ret = ttm_read_lock(&vmaster->lock, true);
675 	if (unlikely(ret != 0))
676 		return ret;
677 
678 	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
679 				    req->size, false, &handle, &dma_buf);
680 	if (unlikely(ret != 0))
681 		goto out_no_dmabuf;
682 
683 	rep->handle = handle;
684 	rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
685 	rep->cur_gmr_id = handle;
686 	rep->cur_gmr_offset = 0;
687 
688 	vmw_dmabuf_unreference(&dma_buf);
689 
690 out_no_dmabuf:
691 	ttm_read_unlock(&vmaster->lock);
692 
693 	return ret;
694 }
695 
696 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
697 			   struct drm_file *file_priv)
698 {
699 	struct drm_vmw_unref_dmabuf_arg *arg =
700 	    (struct drm_vmw_unref_dmabuf_arg *)data;
701 
702 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
703 					 arg->handle,
704 					 TTM_REF_USAGE);
705 }
706 
707 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
708 			   uint32_t handle, struct vmw_dma_buffer **out)
709 {
710 	struct vmw_user_dma_buffer *vmw_user_bo;
711 	struct ttm_base_object *base;
712 
713 	base = ttm_base_object_lookup(tfile, handle);
714 	if (unlikely(base == NULL)) {
715 		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
716 		       (unsigned long)handle);
717 		return -ESRCH;
718 	}
719 
720 	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
721 		ttm_base_object_unref(&base);
722 		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
723 		       (unsigned long)handle);
724 		return -EINVAL;
725 	}
726 
727 	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
728 				   prime.base);
729 	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
730 	ttm_base_object_unref(&base);
731 	*out = &vmw_user_bo->dma;
732 
733 	return 0;
734 }
735 
736 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
737 			      struct vmw_dma_buffer *dma_buf,
738 			      uint32_t *handle)
739 {
740 	struct vmw_user_dma_buffer *user_bo;
741 
742 	if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
743 		return -EINVAL;
744 
745 	user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
746 
747 	*handle = user_bo->prime.base.hash.key;
748 	return ttm_ref_object_add(tfile, &user_bo->prime.base,
749 				  TTM_REF_USAGE, NULL);
750 }
751 
752 /*
753  * Stream management
754  */
755 
756 static void vmw_stream_destroy(struct vmw_resource *res)
757 {
758 	struct vmw_private *dev_priv = res->dev_priv;
759 	struct vmw_stream *stream;
760 	int ret;
761 
762 	DRM_INFO("%s: unref\n", __func__);
763 	stream = container_of(res, struct vmw_stream, res);
764 
765 	ret = vmw_overlay_unref(dev_priv, stream->stream_id);
766 	WARN_ON(ret != 0);
767 }
768 
769 static int vmw_stream_init(struct vmw_private *dev_priv,
770 			   struct vmw_stream *stream,
771 			   void (*res_free) (struct vmw_resource *res))
772 {
773 	struct vmw_resource *res = &stream->res;
774 	int ret;
775 
776 	ret = vmw_resource_init(dev_priv, res, false, res_free,
777 				&vmw_stream_func);
778 
779 	if (unlikely(ret != 0)) {
780 		if (res_free == NULL)
781 			kfree(stream);
782 		else
783 			res_free(&stream->res);
784 		return ret;
785 	}
786 
787 	ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
788 	if (ret) {
789 		vmw_resource_unreference(&res);
790 		return ret;
791 	}
792 
793 	DRM_INFO("%s: claimed\n", __func__);
794 
795 	vmw_resource_activate(&stream->res, vmw_stream_destroy);
796 	return 0;
797 }
798 
799 static void vmw_user_stream_free(struct vmw_resource *res)
800 {
801 	struct vmw_user_stream *stream =
802 	    container_of(res, struct vmw_user_stream, stream.res);
803 	struct vmw_private *dev_priv = res->dev_priv;
804 
805 	ttm_base_object_kfree(stream, base);
806 	ttm_mem_global_free(vmw_mem_glob(dev_priv),
807 			    vmw_user_stream_size);
808 }
809 
810 /**
811  * This function is called when user space has no more references on the
812  * base object. It releases the base-object's reference on the resource object.
813  */
814 
815 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
816 {
817 	struct ttm_base_object *base = *p_base;
818 	struct vmw_user_stream *stream =
819 	    container_of(base, struct vmw_user_stream, base);
820 	struct vmw_resource *res = &stream->stream.res;
821 
822 	*p_base = NULL;
823 	vmw_resource_unreference(&res);
824 }
825 
826 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
827 			   struct drm_file *file_priv)
828 {
829 	struct vmw_private *dev_priv = vmw_priv(dev);
830 	struct vmw_resource *res;
831 	struct vmw_user_stream *stream;
832 	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
833 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
834 	struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
835 	int ret = 0;
836 
837 
838 	res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
839 	if (unlikely(res == NULL))
840 		return -EINVAL;
841 
842 	if (res->res_free != &vmw_user_stream_free) {
843 		ret = -EINVAL;
844 		goto out;
845 	}
846 
847 	stream = container_of(res, struct vmw_user_stream, stream.res);
848 	if (stream->base.tfile != tfile) {
849 		ret = -EINVAL;
850 		goto out;
851 	}
852 
853 	ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
854 out:
855 	vmw_resource_unreference(&res);
856 	return ret;
857 }
858 
859 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
860 			   struct drm_file *file_priv)
861 {
862 	struct vmw_private *dev_priv = vmw_priv(dev);
863 	struct vmw_user_stream *stream;
864 	struct vmw_resource *res;
865 	struct vmw_resource *tmp;
866 	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
867 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
868 	struct vmw_master *vmaster = vmw_master(file_priv->master);
869 	int ret;
870 
871 	/*
872 	 * Approximate idr memory usage with 128 bytes. It will be limited
873 	 * by maximum number_of streams anyway?
874 	 */
875 
876 	if (unlikely(vmw_user_stream_size == 0))
877 		vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
878 
879 	ret = ttm_read_lock(&vmaster->lock, true);
880 	if (unlikely(ret != 0))
881 		return ret;
882 
883 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
884 				   vmw_user_stream_size,
885 				   false, true);
886 	if (unlikely(ret != 0)) {
887 		if (ret != -ERESTARTSYS)
888 			DRM_ERROR("Out of graphics memory for stream"
889 				  " creation.\n");
890 		goto out_unlock;
891 	}
892 
893 
894 	stream = kmalloc(sizeof(*stream), GFP_KERNEL);
895 	if (unlikely(stream == NULL)) {
896 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
897 				    vmw_user_stream_size);
898 		ret = -ENOMEM;
899 		goto out_unlock;
900 	}
901 
902 	res = &stream->stream.res;
903 	stream->base.shareable = false;
904 	stream->base.tfile = NULL;
905 
906 	/*
907 	 * From here on, the destructor takes over resource freeing.
908 	 */
909 
910 	ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
911 	if (unlikely(ret != 0))
912 		goto out_unlock;
913 
914 	tmp = vmw_resource_reference(res);
915 	ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
916 				   &vmw_user_stream_base_release, NULL);
917 
918 	if (unlikely(ret != 0)) {
919 		vmw_resource_unreference(&tmp);
920 		goto out_err;
921 	}
922 
923 	arg->stream_id = res->id;
924 out_err:
925 	vmw_resource_unreference(&res);
926 out_unlock:
927 	ttm_read_unlock(&vmaster->lock);
928 	return ret;
929 }
930 
931 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
932 			   struct ttm_object_file *tfile,
933 			   uint32_t *inout_id, struct vmw_resource **out)
934 {
935 	struct vmw_user_stream *stream;
936 	struct vmw_resource *res;
937 	int ret;
938 
939 	res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
940 				  *inout_id);
941 	if (unlikely(res == NULL))
942 		return -EINVAL;
943 
944 	if (res->res_free != &vmw_user_stream_free) {
945 		ret = -EINVAL;
946 		goto err_ref;
947 	}
948 
949 	stream = container_of(res, struct vmw_user_stream, stream.res);
950 	if (stream->base.tfile != tfile) {
951 		ret = -EPERM;
952 		goto err_ref;
953 	}
954 
955 	*inout_id = stream->stream.stream_id;
956 	*out = res;
957 	return 0;
958 err_ref:
959 	vmw_resource_unreference(&res);
960 	return ret;
961 }
962 
963 
964 /**
965  * vmw_dumb_create - Create a dumb kms buffer
966  *
967  * @file_priv: Pointer to a struct drm_file identifying the caller.
968  * @dev: Pointer to the drm device.
969  * @args: Pointer to a struct drm_mode_create_dumb structure
970  *
971  * This is a driver callback for the core drm create_dumb functionality.
972  * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
973  * that the arguments have a different format.
974  */
975 int vmw_dumb_create(struct drm_file *file_priv,
976 		    struct drm_device *dev,
977 		    struct drm_mode_create_dumb *args)
978 {
979 	struct vmw_private *dev_priv = vmw_priv(dev);
980 	struct vmw_master *vmaster = vmw_master(file_priv->master);
981 	struct vmw_dma_buffer *dma_buf;
982 	int ret;
983 
984 	args->pitch = args->width * ((args->bpp + 7) / 8);
985 	args->size = args->pitch * args->height;
986 
987 	ret = ttm_read_lock(&vmaster->lock, true);
988 	if (unlikely(ret != 0))
989 		return ret;
990 
991 	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
992 				    args->size, false, &args->handle,
993 				    &dma_buf);
994 	if (unlikely(ret != 0))
995 		goto out_no_dmabuf;
996 
997 	vmw_dmabuf_unreference(&dma_buf);
998 out_no_dmabuf:
999 	ttm_read_unlock(&vmaster->lock);
1000 	return ret;
1001 }
1002 
1003 /**
1004  * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1005  *
1006  * @file_priv: Pointer to a struct drm_file identifying the caller.
1007  * @dev: Pointer to the drm device.
1008  * @handle: Handle identifying the dumb buffer.
1009  * @offset: The address space offset returned.
1010  *
1011  * This is a driver callback for the core drm dumb_map_offset functionality.
1012  */
1013 int vmw_dumb_map_offset(struct drm_file *file_priv,
1014 			struct drm_device *dev, uint32_t handle,
1015 			uint64_t *offset)
1016 {
1017 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1018 	struct vmw_dma_buffer *out_buf;
1019 	int ret;
1020 
1021 	ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
1022 	if (ret != 0)
1023 		return -EINVAL;
1024 
1025 	*offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1026 	vmw_dmabuf_unreference(&out_buf);
1027 	return 0;
1028 }
1029 
1030 /**
1031  * vmw_dumb_destroy - Destroy a dumb boffer
1032  *
1033  * @file_priv: Pointer to a struct drm_file identifying the caller.
1034  * @dev: Pointer to the drm device.
1035  * @handle: Handle identifying the dumb buffer.
1036  *
1037  * This is a driver callback for the core drm dumb_destroy functionality.
1038  */
1039 int vmw_dumb_destroy(struct drm_file *file_priv,
1040 		     struct drm_device *dev,
1041 		     uint32_t handle)
1042 {
1043 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1044 					 handle, TTM_REF_USAGE);
1045 }
1046 
1047 /**
1048  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
1049  *
1050  * @res:            The resource for which to allocate a backup buffer.
1051  * @interruptible:  Whether any sleeps during allocation should be
1052  *                  performed while interruptible.
1053  */
1054 static int vmw_resource_buf_alloc(struct vmw_resource *res,
1055 				  bool interruptible)
1056 {
1057 	unsigned long size =
1058 		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
1059 	struct vmw_dma_buffer *backup;
1060 	int ret;
1061 
1062 	if (likely(res->backup)) {
1063 		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
1064 		return 0;
1065 	}
1066 
1067 	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
1068 	if (unlikely(backup == NULL))
1069 		return -ENOMEM;
1070 
1071 	ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
1072 			      res->func->backup_placement,
1073 			      interruptible,
1074 			      &vmw_dmabuf_bo_free);
1075 	if (unlikely(ret != 0))
1076 		goto out_no_dmabuf;
1077 
1078 	res->backup = backup;
1079 
1080 out_no_dmabuf:
1081 	return ret;
1082 }
1083 
1084 /**
1085  * vmw_resource_do_validate - Make a resource up-to-date and visible
1086  *                            to the device.
1087  *
1088  * @res:            The resource to make visible to the device.
1089  * @val_buf:        Information about a buffer possibly
1090  *                  containing backup data if a bind operation is needed.
1091  *
1092  * On hardware resource shortage, this function returns -EBUSY and
1093  * should be retried once resources have been freed up.
1094  */
1095 static int vmw_resource_do_validate(struct vmw_resource *res,
1096 				    struct ttm_validate_buffer *val_buf)
1097 {
1098 	int ret = 0;
1099 	const struct vmw_res_func *func = res->func;
1100 
1101 	if (unlikely(res->id == -1)) {
1102 		ret = func->create(res);
1103 		if (unlikely(ret != 0))
1104 			return ret;
1105 	}
1106 
1107 	if (func->bind &&
1108 	    ((func->needs_backup && list_empty(&res->mob_head) &&
1109 	      val_buf->bo != NULL) ||
1110 	     (!func->needs_backup && val_buf->bo != NULL))) {
1111 		ret = func->bind(res, val_buf);
1112 		if (unlikely(ret != 0))
1113 			goto out_bind_failed;
1114 		if (func->needs_backup)
1115 			list_add_tail(&res->mob_head, &res->backup->res_list);
1116 	}
1117 
1118 	/*
1119 	 * Only do this on write operations, and move to
1120 	 * vmw_resource_unreserve if it can be called after
1121 	 * backup buffers have been unreserved. Otherwise
1122 	 * sort out locking.
1123 	 */
1124 	res->res_dirty = true;
1125 
1126 	return 0;
1127 
1128 out_bind_failed:
1129 	func->destroy(res);
1130 
1131 	return ret;
1132 }
1133 
1134 /**
1135  * vmw_resource_unreserve - Unreserve a resource previously reserved for
1136  * command submission.
1137  *
1138  * @res:               Pointer to the struct vmw_resource to unreserve.
1139  * @new_backup:        Pointer to new backup buffer if command submission
1140  *                     switched.
1141  * @new_backup_offset: New backup offset if @new_backup is !NULL.
1142  *
1143  * Currently unreserving a resource means putting it back on the device's
1144  * resource lru list, so that it can be evicted if necessary.
1145  */
1146 void vmw_resource_unreserve(struct vmw_resource *res,
1147 			    struct vmw_dma_buffer *new_backup,
1148 			    unsigned long new_backup_offset)
1149 {
1150 	struct vmw_private *dev_priv = res->dev_priv;
1151 
1152 	if (!list_empty(&res->lru_head))
1153 		return;
1154 
1155 	if (new_backup && new_backup != res->backup) {
1156 
1157 		if (res->backup) {
1158 			lockdep_assert_held(&res->backup->base.resv->lock.base);
1159 			list_del_init(&res->mob_head);
1160 			vmw_dmabuf_unreference(&res->backup);
1161 		}
1162 
1163 		res->backup = vmw_dmabuf_reference(new_backup);
1164 		lockdep_assert_held(&new_backup->base.resv->lock.base);
1165 		list_add_tail(&res->mob_head, &new_backup->res_list);
1166 	}
1167 	if (new_backup)
1168 		res->backup_offset = new_backup_offset;
1169 
1170 	if (!res->func->may_evict || res->id == -1)
1171 		return;
1172 
1173 	write_lock(&dev_priv->resource_lock);
1174 	list_add_tail(&res->lru_head,
1175 		      &res->dev_priv->res_lru[res->func->res_type]);
1176 	write_unlock(&dev_priv->resource_lock);
1177 }
1178 
1179 /**
1180  * vmw_resource_check_buffer - Check whether a backup buffer is needed
1181  *                             for a resource and in that case, allocate
1182  *                             one, reserve and validate it.
1183  *
1184  * @res:            The resource for which to allocate a backup buffer.
1185  * @interruptible:  Whether any sleeps during allocation should be
1186  *                  performed while interruptible.
1187  * @val_buf:        On successful return contains data about the
1188  *                  reserved and validated backup buffer.
1189  */
1190 static int
1191 vmw_resource_check_buffer(struct vmw_resource *res,
1192 			  bool interruptible,
1193 			  struct ttm_validate_buffer *val_buf)
1194 {
1195 	struct list_head val_list;
1196 	bool backup_dirty = false;
1197 	int ret;
1198 
1199 	if (unlikely(res->backup == NULL)) {
1200 		ret = vmw_resource_buf_alloc(res, interruptible);
1201 		if (unlikely(ret != 0))
1202 			return ret;
1203 	}
1204 
1205 	INIT_LIST_HEAD(&val_list);
1206 	val_buf->bo = ttm_bo_reference(&res->backup->base);
1207 	list_add_tail(&val_buf->head, &val_list);
1208 	ret = ttm_eu_reserve_buffers(NULL, &val_list);
1209 	if (unlikely(ret != 0))
1210 		goto out_no_reserve;
1211 
1212 	if (res->func->needs_backup && list_empty(&res->mob_head))
1213 		return 0;
1214 
1215 	backup_dirty = res->backup_dirty;
1216 	ret = ttm_bo_validate(&res->backup->base,
1217 			      res->func->backup_placement,
1218 			      true, false);
1219 
1220 	if (unlikely(ret != 0))
1221 		goto out_no_validate;
1222 
1223 	return 0;
1224 
1225 out_no_validate:
1226 	ttm_eu_backoff_reservation(NULL, &val_list);
1227 out_no_reserve:
1228 	ttm_bo_unref(&val_buf->bo);
1229 	if (backup_dirty)
1230 		vmw_dmabuf_unreference(&res->backup);
1231 
1232 	return ret;
1233 }
1234 
1235 /**
1236  * vmw_resource_reserve - Reserve a resource for command submission
1237  *
1238  * @res:            The resource to reserve.
1239  *
1240  * This function takes the resource off the LRU list and make sure
1241  * a backup buffer is present for guest-backed resources. However,
1242  * the buffer may not be bound to the resource at this point.
1243  *
1244  */
1245 int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1246 {
1247 	struct vmw_private *dev_priv = res->dev_priv;
1248 	int ret;
1249 
1250 	write_lock(&dev_priv->resource_lock);
1251 	list_del_init(&res->lru_head);
1252 	write_unlock(&dev_priv->resource_lock);
1253 
1254 	if (res->func->needs_backup && res->backup == NULL &&
1255 	    !no_backup) {
1256 		ret = vmw_resource_buf_alloc(res, true);
1257 		if (unlikely(ret != 0))
1258 			return ret;
1259 	}
1260 
1261 	return 0;
1262 }
1263 
1264 /**
1265  * vmw_resource_backoff_reservation - Unreserve and unreference a
1266  *                                    backup buffer
1267  *.
1268  * @val_buf:        Backup buffer information.
1269  */
1270 static void
1271 vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1272 {
1273 	struct list_head val_list;
1274 
1275 	if (likely(val_buf->bo == NULL))
1276 		return;
1277 
1278 	INIT_LIST_HEAD(&val_list);
1279 	list_add_tail(&val_buf->head, &val_list);
1280 	ttm_eu_backoff_reservation(NULL, &val_list);
1281 	ttm_bo_unref(&val_buf->bo);
1282 }
1283 
1284 /**
1285  * vmw_resource_do_evict - Evict a resource, and transfer its data
1286  *                         to a backup buffer.
1287  *
1288  * @res:            The resource to evict.
1289  * @interruptible:  Whether to wait interruptible.
1290  */
1291 int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1292 {
1293 	struct ttm_validate_buffer val_buf;
1294 	const struct vmw_res_func *func = res->func;
1295 	int ret;
1296 
1297 	BUG_ON(!func->may_evict);
1298 
1299 	val_buf.bo = NULL;
1300 	ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1301 	if (unlikely(ret != 0))
1302 		return ret;
1303 
1304 	if (unlikely(func->unbind != NULL &&
1305 		     (!func->needs_backup || !list_empty(&res->mob_head)))) {
1306 		ret = func->unbind(res, res->res_dirty, &val_buf);
1307 		if (unlikely(ret != 0))
1308 			goto out_no_unbind;
1309 		list_del_init(&res->mob_head);
1310 	}
1311 	ret = func->destroy(res);
1312 	res->backup_dirty = true;
1313 	res->res_dirty = false;
1314 out_no_unbind:
1315 	vmw_resource_backoff_reservation(&val_buf);
1316 
1317 	return ret;
1318 }
1319 
1320 
1321 /**
1322  * vmw_resource_validate - Make a resource up-to-date and visible
1323  *                         to the device.
1324  *
1325  * @res:            The resource to make visible to the device.
1326  *
1327  * On succesful return, any backup DMA buffer pointed to by @res->backup will
1328  * be reserved and validated.
1329  * On hardware resource shortage, this function will repeatedly evict
1330  * resources of the same type until the validation succeeds.
1331  */
1332 int vmw_resource_validate(struct vmw_resource *res)
1333 {
1334 	int ret;
1335 	struct vmw_resource *evict_res;
1336 	struct vmw_private *dev_priv = res->dev_priv;
1337 	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1338 	struct ttm_validate_buffer val_buf;
1339 	unsigned err_count = 0;
1340 
1341 	if (likely(!res->func->may_evict))
1342 		return 0;
1343 
1344 	val_buf.bo = NULL;
1345 	if (res->backup)
1346 		val_buf.bo = &res->backup->base;
1347 	do {
1348 		ret = vmw_resource_do_validate(res, &val_buf);
1349 		if (likely(ret != -EBUSY))
1350 			break;
1351 
1352 		write_lock(&dev_priv->resource_lock);
1353 		if (list_empty(lru_list) || !res->func->may_evict) {
1354 			DRM_ERROR("Out of device device resources "
1355 				  "for %s.\n", res->func->type_name);
1356 			ret = -EBUSY;
1357 			write_unlock(&dev_priv->resource_lock);
1358 			break;
1359 		}
1360 
1361 		evict_res = vmw_resource_reference
1362 			(list_first_entry(lru_list, struct vmw_resource,
1363 					  lru_head));
1364 		list_del_init(&evict_res->lru_head);
1365 
1366 		write_unlock(&dev_priv->resource_lock);
1367 
1368 		ret = vmw_resource_do_evict(evict_res, true);
1369 		if (unlikely(ret != 0)) {
1370 			write_lock(&dev_priv->resource_lock);
1371 			list_add_tail(&evict_res->lru_head, lru_list);
1372 			write_unlock(&dev_priv->resource_lock);
1373 			if (ret == -ERESTARTSYS ||
1374 			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1375 				vmw_resource_unreference(&evict_res);
1376 				goto out_no_validate;
1377 			}
1378 		}
1379 
1380 		vmw_resource_unreference(&evict_res);
1381 	} while (1);
1382 
1383 	if (unlikely(ret != 0))
1384 		goto out_no_validate;
1385 	else if (!res->func->needs_backup && res->backup) {
1386 		list_del_init(&res->mob_head);
1387 		vmw_dmabuf_unreference(&res->backup);
1388 	}
1389 
1390 	return 0;
1391 
1392 out_no_validate:
1393 	return ret;
1394 }
1395 
1396 /**
1397  * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1398  *                       object without unreserving it.
1399  *
1400  * @bo:             Pointer to the struct ttm_buffer_object to fence.
1401  * @fence:          Pointer to the fence. If NULL, this function will
1402  *                  insert a fence into the command stream..
1403  *
1404  * Contrary to the ttm_eu version of this function, it takes only
1405  * a single buffer object instead of a list, and it also doesn't
1406  * unreserve the buffer object, which needs to be done separately.
1407  */
1408 void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1409 			 struct vmw_fence_obj *fence)
1410 {
1411 	struct ttm_bo_device *bdev = bo->bdev;
1412 	struct ttm_bo_driver *driver = bdev->driver;
1413 	struct vmw_fence_obj *old_fence_obj;
1414 	struct vmw_private *dev_priv =
1415 		container_of(bdev, struct vmw_private, bdev);
1416 
1417 	if (fence == NULL)
1418 		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1419 	else
1420 		driver->sync_obj_ref(fence);
1421 
1422 	spin_lock(&bdev->fence_lock);
1423 
1424 	old_fence_obj = bo->sync_obj;
1425 	bo->sync_obj = fence;
1426 
1427 	spin_unlock(&bdev->fence_lock);
1428 
1429 	if (old_fence_obj)
1430 		vmw_fence_obj_unreference(&old_fence_obj);
1431 }
1432 
1433 /**
1434  * vmw_resource_move_notify - TTM move_notify_callback
1435  *
1436  * @bo:             The TTM buffer object about to move.
1437  * @mem:            The truct ttm_mem_reg indicating to what memory
1438  *                  region the move is taking place.
1439  *
1440  * Evicts the Guest Backed hardware resource if the backup
1441  * buffer is being moved out of MOB memory.
1442  * Note that this function should not race with the resource
1443  * validation code as long as it accesses only members of struct
1444  * resource that remain static while bo::res is !NULL and
1445  * while we have @bo reserved. struct resource::backup is *not* a
1446  * static member. The resource validation code will take care
1447  * to set @bo::res to NULL, while having @bo reserved when the
1448  * buffer is no longer bound to the resource, so @bo:res can be
1449  * used to determine whether there is a need to unbind and whether
1450  * it is safe to unbind.
1451  */
1452 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1453 			      struct ttm_mem_reg *mem)
1454 {
1455 	struct vmw_dma_buffer *dma_buf;
1456 
1457 	if (mem == NULL)
1458 		return;
1459 
1460 	if (bo->destroy != vmw_dmabuf_bo_free &&
1461 	    bo->destroy != vmw_user_dmabuf_destroy)
1462 		return;
1463 
1464 	dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1465 
1466 	if (mem->mem_type != VMW_PL_MOB) {
1467 		struct vmw_resource *res, *n;
1468 		struct ttm_bo_device *bdev = bo->bdev;
1469 		struct ttm_validate_buffer val_buf;
1470 
1471 		val_buf.bo = bo;
1472 
1473 		list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1474 
1475 			if (unlikely(res->func->unbind == NULL))
1476 				continue;
1477 
1478 			(void) res->func->unbind(res, true, &val_buf);
1479 			res->backup_dirty = true;
1480 			res->res_dirty = false;
1481 			list_del_init(&res->mob_head);
1482 		}
1483 
1484 		spin_lock(&bdev->fence_lock);
1485 		(void) ttm_bo_wait(bo, false, false, false);
1486 		spin_unlock(&bdev->fence_lock);
1487 	}
1488 }
1489 
1490 /**
1491  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1492  *
1493  * @res:            The resource being queried.
1494  */
1495 bool vmw_resource_needs_backup(const struct vmw_resource *res)
1496 {
1497 	return res->func->needs_backup;
1498 }
1499 
1500 /**
1501  * vmw_resource_evict_type - Evict all resources of a specific type
1502  *
1503  * @dev_priv:       Pointer to a device private struct
1504  * @type:           The resource type to evict
1505  *
1506  * To avoid thrashing starvation or as part of the hibernation sequence,
1507  * try to evict all evictable resources of a specific type.
1508  */
1509 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1510 				    enum vmw_res_type type)
1511 {
1512 	struct list_head *lru_list = &dev_priv->res_lru[type];
1513 	struct vmw_resource *evict_res;
1514 	unsigned err_count = 0;
1515 	int ret;
1516 
1517 	do {
1518 		write_lock(&dev_priv->resource_lock);
1519 
1520 		if (list_empty(lru_list))
1521 			goto out_unlock;
1522 
1523 		evict_res = vmw_resource_reference(
1524 			list_first_entry(lru_list, struct vmw_resource,
1525 					 lru_head));
1526 		list_del_init(&evict_res->lru_head);
1527 		write_unlock(&dev_priv->resource_lock);
1528 
1529 		ret = vmw_resource_do_evict(evict_res, false);
1530 		if (unlikely(ret != 0)) {
1531 			write_lock(&dev_priv->resource_lock);
1532 			list_add_tail(&evict_res->lru_head, lru_list);
1533 			write_unlock(&dev_priv->resource_lock);
1534 			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1535 				vmw_resource_unreference(&evict_res);
1536 				return;
1537 			}
1538 		}
1539 
1540 		vmw_resource_unreference(&evict_res);
1541 	} while (1);
1542 
1543 out_unlock:
1544 	write_unlock(&dev_priv->resource_lock);
1545 }
1546 
1547 /**
1548  * vmw_resource_evict_all - Evict all evictable resources
1549  *
1550  * @dev_priv:       Pointer to a device private struct
1551  *
1552  * To avoid thrashing starvation or as part of the hibernation sequence,
1553  * evict all evictable resources. In particular this means that all
1554  * guest-backed resources that are registered with the device are
1555  * evicted and the OTable becomes clean.
1556  */
1557 void vmw_resource_evict_all(struct vmw_private *dev_priv)
1558 {
1559 	enum vmw_res_type type;
1560 
1561 	mutex_lock(&dev_priv->cmdbuf_mutex);
1562 
1563 	for (type = 0; type < vmw_res_max; ++type)
1564 		vmw_resource_evict_type(dev_priv, type);
1565 
1566 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1567 }
1568