xref: /openbmc/linux/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c (revision 941518d6)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 
29 #include <drm/ttm/ttm_placement.h>
30 
31 #include "vmwgfx_drv.h"
32 #include "ttm_object.h"
33 
34 
35 /**
36  * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
37  * vmw_buffer_object.
38  *
39  * @bo: Pointer to the TTM buffer object.
40  * Return: Pointer to the struct vmw_buffer_object embedding the
41  * TTM buffer object.
42  */
43 static struct vmw_buffer_object *
44 vmw_buffer_object(struct ttm_buffer_object *bo)
45 {
46 	return container_of(bo, struct vmw_buffer_object, base);
47 }
48 
49 
50 /**
51  * vmw_bo_pin_in_placement - Validate a buffer to placement.
52  *
53  * @dev_priv:  Driver private.
54  * @buf:  DMA buffer to move.
55  * @placement:  The placement to pin it.
56  * @interruptible:  Use interruptible wait.
57  * Return: Zero on success, Negative error code on failure. In particular
58  * -ERESTARTSYS if interrupted by a signal
59  */
60 int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
61 			    struct vmw_buffer_object *buf,
62 			    struct ttm_placement *placement,
63 			    bool interruptible)
64 {
65 	struct ttm_operation_ctx ctx = {interruptible, false };
66 	struct ttm_buffer_object *bo = &buf->base;
67 	int ret;
68 
69 	vmw_execbuf_release_pinned_bo(dev_priv);
70 
71 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
72 	if (unlikely(ret != 0))
73 		goto err;
74 
75 	if (buf->base.pin_count > 0)
76 		ret = ttm_resource_compat(bo->resource, placement)
77 			? 0 : -EINVAL;
78 	else
79 		ret = ttm_bo_validate(bo, placement, &ctx);
80 
81 	if (!ret)
82 		vmw_bo_pin_reserved(buf, true);
83 
84 	ttm_bo_unreserve(bo);
85 err:
86 	return ret;
87 }
88 
89 
90 /**
91  * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
92  *
93  * This function takes the reservation_sem in write mode.
94  * Flushes and unpins the query bo to avoid failures.
95  *
96  * @dev_priv:  Driver private.
97  * @buf:  DMA buffer to move.
98  * @interruptible:  Use interruptible wait.
99  * Return: Zero on success, Negative error code on failure. In particular
100  * -ERESTARTSYS if interrupted by a signal
101  */
102 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
103 			      struct vmw_buffer_object *buf,
104 			      bool interruptible)
105 {
106 	struct ttm_operation_ctx ctx = {interruptible, false };
107 	struct ttm_buffer_object *bo = &buf->base;
108 	int ret;
109 
110 	vmw_execbuf_release_pinned_bo(dev_priv);
111 
112 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
113 	if (unlikely(ret != 0))
114 		goto err;
115 
116 	if (buf->base.pin_count > 0) {
117 		ret = ttm_resource_compat(bo->resource, &vmw_vram_gmr_placement)
118 			? 0 : -EINVAL;
119 		goto out_unreserve;
120 	}
121 
122 	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
123 	if (likely(ret == 0) || ret == -ERESTARTSYS)
124 		goto out_unreserve;
125 
126 	ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
127 
128 out_unreserve:
129 	if (!ret)
130 		vmw_bo_pin_reserved(buf, true);
131 
132 	ttm_bo_unreserve(bo);
133 err:
134 	return ret;
135 }
136 
137 
138 /**
139  * vmw_bo_pin_in_vram - Move a buffer to vram.
140  *
141  * This function takes the reservation_sem in write mode.
142  * Flushes and unpins the query bo to avoid failures.
143  *
144  * @dev_priv:  Driver private.
145  * @buf:  DMA buffer to move.
146  * @interruptible:  Use interruptible wait.
147  * Return: Zero on success, Negative error code on failure. In particular
148  * -ERESTARTSYS if interrupted by a signal
149  */
150 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
151 		       struct vmw_buffer_object *buf,
152 		       bool interruptible)
153 {
154 	return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
155 				       interruptible);
156 }
157 
158 
159 /**
160  * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
161  *
162  * This function takes the reservation_sem in write mode.
163  * Flushes and unpins the query bo to avoid failures.
164  *
165  * @dev_priv:  Driver private.
166  * @buf:  DMA buffer to pin.
167  * @interruptible:  Use interruptible wait.
168  * Return: Zero on success, Negative error code on failure. In particular
169  * -ERESTARTSYS if interrupted by a signal
170  */
171 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
172 				struct vmw_buffer_object *buf,
173 				bool interruptible)
174 {
175 	struct ttm_operation_ctx ctx = {interruptible, false };
176 	struct ttm_buffer_object *bo = &buf->base;
177 	struct ttm_placement placement;
178 	struct ttm_place place;
179 	int ret = 0;
180 
181 	place = vmw_vram_placement.placement[0];
182 	place.lpfn = bo->resource->num_pages;
183 	placement.num_placement = 1;
184 	placement.placement = &place;
185 	placement.num_busy_placement = 1;
186 	placement.busy_placement = &place;
187 
188 	vmw_execbuf_release_pinned_bo(dev_priv);
189 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
190 	if (unlikely(ret != 0))
191 		goto err_unlock;
192 
193 	/*
194 	 * Is this buffer already in vram but not at the start of it?
195 	 * In that case, evict it first because TTM isn't good at handling
196 	 * that situation.
197 	 */
198 	if (bo->resource->mem_type == TTM_PL_VRAM &&
199 	    bo->resource->start < bo->resource->num_pages &&
200 	    bo->resource->start > 0 &&
201 	    buf->base.pin_count == 0) {
202 		ctx.interruptible = false;
203 		(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
204 	}
205 
206 	if (buf->base.pin_count > 0)
207 		ret = ttm_resource_compat(bo->resource, &placement)
208 			? 0 : -EINVAL;
209 	else
210 		ret = ttm_bo_validate(bo, &placement, &ctx);
211 
212 	/* For some reason we didn't end up at the start of vram */
213 	WARN_ON(ret == 0 && bo->resource->start != 0);
214 	if (!ret)
215 		vmw_bo_pin_reserved(buf, true);
216 
217 	ttm_bo_unreserve(bo);
218 err_unlock:
219 
220 	return ret;
221 }
222 
223 
224 /**
225  * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
226  *
227  * This function takes the reservation_sem in write mode.
228  *
229  * @dev_priv:  Driver private.
230  * @buf:  DMA buffer to unpin.
231  * @interruptible:  Use interruptible wait.
232  * Return: Zero on success, Negative error code on failure. In particular
233  * -ERESTARTSYS if interrupted by a signal
234  */
235 int vmw_bo_unpin(struct vmw_private *dev_priv,
236 		 struct vmw_buffer_object *buf,
237 		 bool interruptible)
238 {
239 	struct ttm_buffer_object *bo = &buf->base;
240 	int ret;
241 
242 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
243 	if (unlikely(ret != 0))
244 		goto err;
245 
246 	vmw_bo_pin_reserved(buf, false);
247 
248 	ttm_bo_unreserve(bo);
249 
250 err:
251 	return ret;
252 }
253 
254 /**
255  * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
256  * of a buffer.
257  *
258  * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
259  * @ptr: SVGAGuestPtr returning the result.
260  */
261 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
262 			  SVGAGuestPtr *ptr)
263 {
264 	if (bo->resource->mem_type == TTM_PL_VRAM) {
265 		ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
266 		ptr->offset = bo->resource->start << PAGE_SHIFT;
267 	} else {
268 		ptr->gmrId = bo->resource->start;
269 		ptr->offset = 0;
270 	}
271 }
272 
273 
274 /**
275  * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
276  *
277  * @vbo: The buffer object. Must be reserved.
278  * @pin: Whether to pin or unpin.
279  *
280  */
281 void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
282 {
283 	struct ttm_operation_ctx ctx = { false, true };
284 	struct ttm_place pl;
285 	struct ttm_placement placement;
286 	struct ttm_buffer_object *bo = &vbo->base;
287 	uint32_t old_mem_type = bo->resource->mem_type;
288 	int ret;
289 
290 	dma_resv_assert_held(bo->base.resv);
291 
292 	if (pin == !!bo->pin_count)
293 		return;
294 
295 	pl.fpfn = 0;
296 	pl.lpfn = 0;
297 	pl.mem_type = bo->resource->mem_type;
298 	pl.flags = bo->resource->placement;
299 
300 	memset(&placement, 0, sizeof(placement));
301 	placement.num_placement = 1;
302 	placement.placement = &pl;
303 
304 	ret = ttm_bo_validate(bo, &placement, &ctx);
305 
306 	BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type);
307 
308 	if (pin)
309 		ttm_bo_pin(bo);
310 	else
311 		ttm_bo_unpin(bo);
312 }
313 
314 /**
315  * vmw_bo_map_and_cache - Map a buffer object and cache the map
316  *
317  * @vbo: The buffer object to map
318  * Return: A kernel virtual address or NULL if mapping failed.
319  *
320  * This function maps a buffer object into the kernel address space, or
321  * returns the virtual kernel address of an already existing map. The virtual
322  * address remains valid as long as the buffer object is pinned or reserved.
323  * The cached map is torn down on either
324  * 1) Buffer object move
325  * 2) Buffer object swapout
326  * 3) Buffer object destruction
327  *
328  */
329 void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
330 {
331 	struct ttm_buffer_object *bo = &vbo->base;
332 	bool not_used;
333 	void *virtual;
334 	int ret;
335 
336 	virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
337 	if (virtual)
338 		return virtual;
339 
340 	ret = ttm_bo_kmap(bo, 0, bo->resource->num_pages, &vbo->map);
341 	if (ret)
342 		DRM_ERROR("Buffer object map failed: %d.\n", ret);
343 
344 	return ttm_kmap_obj_virtual(&vbo->map, &not_used);
345 }
346 
347 
348 /**
349  * vmw_bo_unmap - Tear down a cached buffer object map.
350  *
351  * @vbo: The buffer object whose map we are tearing down.
352  *
353  * This function tears down a cached map set up using
354  * vmw_buffer_object_map_and_cache().
355  */
356 void vmw_bo_unmap(struct vmw_buffer_object *vbo)
357 {
358 	if (vbo->map.bo == NULL)
359 		return;
360 
361 	ttm_bo_kunmap(&vbo->map);
362 }
363 
364 
365 /**
366  * vmw_bo_bo_free - vmw buffer object destructor
367  *
368  * @bo: Pointer to the embedded struct ttm_buffer_object
369  */
370 void vmw_bo_bo_free(struct ttm_buffer_object *bo)
371 {
372 	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
373 
374 	WARN_ON(vmw_bo->dirty);
375 	WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
376 	vmw_bo_unmap(vmw_bo);
377 	drm_gem_object_release(&bo->base);
378 	kfree(vmw_bo);
379 }
380 
381 /**
382  * vmw_bo_create_kernel - Create a pinned BO for internal kernel use.
383  *
384  * @dev_priv: Pointer to the device private struct
385  * @size: size of the BO we need
386  * @placement: where to put it
387  * @p_bo: resulting BO
388  *
389  * Creates and pin a simple BO for in kernel use.
390  */
391 int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
392 			 struct ttm_placement *placement,
393 			 struct ttm_buffer_object **p_bo)
394 {
395 	struct ttm_operation_ctx ctx = {
396 		.interruptible = false,
397 		.no_wait_gpu = false
398 	};
399 	struct ttm_buffer_object *bo;
400 	struct drm_device *vdev = &dev_priv->drm;
401 	int ret;
402 
403 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
404 	if (unlikely(!bo))
405 		return -ENOMEM;
406 
407 	size = ALIGN(size, PAGE_SIZE);
408 
409 	drm_gem_private_object_init(vdev, &bo->base, size);
410 
411 	ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
412 				   ttm_bo_type_kernel, placement, 0,
413 				   &ctx, NULL, NULL, NULL);
414 	if (unlikely(ret))
415 		goto error_free;
416 
417 	ttm_bo_pin(bo);
418 	ttm_bo_unreserve(bo);
419 	*p_bo = bo;
420 
421 	return 0;
422 
423 error_free:
424 	kfree(bo);
425 	return ret;
426 }
427 
428 int vmw_bo_create(struct vmw_private *vmw,
429 		  size_t size, struct ttm_placement *placement,
430 		  bool interruptible, bool pin,
431 		  void (*bo_free)(struct ttm_buffer_object *bo),
432 		  struct vmw_buffer_object **p_bo)
433 {
434 	int ret;
435 
436 	*p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL);
437 	if (unlikely(!*p_bo)) {
438 		DRM_ERROR("Failed to allocate a buffer.\n");
439 		return -ENOMEM;
440 	}
441 
442 	ret = vmw_bo_init(vmw, *p_bo, size,
443 			  placement, interruptible, pin,
444 			  bo_free);
445 	if (unlikely(ret != 0))
446 		goto out_error;
447 
448 	return ret;
449 out_error:
450 	kfree(*p_bo);
451 	*p_bo = NULL;
452 	return ret;
453 }
454 
455 /**
456  * vmw_bo_init - Initialize a vmw buffer object
457  *
458  * @dev_priv: Pointer to the device private struct
459  * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
460  * @size: Buffer object size in bytes.
461  * @placement: Initial placement.
462  * @interruptible: Whether waits should be performed interruptible.
463  * @pin: If the BO should be created pinned at a fixed location.
464  * @bo_free: The buffer object destructor.
465  * Returns: Zero on success, negative error code on error.
466  *
467  * Note that on error, the code will free the buffer object.
468  */
469 int vmw_bo_init(struct vmw_private *dev_priv,
470 		struct vmw_buffer_object *vmw_bo,
471 		size_t size, struct ttm_placement *placement,
472 		bool interruptible, bool pin,
473 		void (*bo_free)(struct ttm_buffer_object *bo))
474 {
475 	struct ttm_operation_ctx ctx = {
476 		.interruptible = interruptible,
477 		.no_wait_gpu = false
478 	};
479 	struct ttm_device *bdev = &dev_priv->bdev;
480 	struct drm_device *vdev = &dev_priv->drm;
481 	int ret;
482 
483 	WARN_ON_ONCE(!bo_free);
484 	memset(vmw_bo, 0, sizeof(*vmw_bo));
485 	BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
486 	vmw_bo->base.priority = 3;
487 	vmw_bo->res_tree = RB_ROOT;
488 
489 	size = ALIGN(size, PAGE_SIZE);
490 	drm_gem_private_object_init(vdev, &vmw_bo->base.base, size);
491 
492 	ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
493 				   ttm_bo_type_device,
494 				   placement,
495 				   0, &ctx, NULL, NULL, bo_free);
496 	if (unlikely(ret)) {
497 		return ret;
498 	}
499 
500 	if (pin)
501 		ttm_bo_pin(&vmw_bo->base);
502 	ttm_bo_unreserve(&vmw_bo->base);
503 
504 	return 0;
505 }
506 
507 /**
508  * vmw_user_bo_synccpu_grab - Grab a struct vmw_buffer_object for cpu
509  * access, idling previous GPU operations on the buffer and optionally
510  * blocking it for further command submissions.
511  *
512  * @vmw_bo: Pointer to the buffer object being grabbed for CPU access
513  * @flags: Flags indicating how the grab should be performed.
514  * Return: Zero on success, Negative error code on error. In particular,
515  * -EBUSY will be returned if a dontblock operation is requested and the
516  * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
517  * interrupted by a signal.
518  *
519  * A blocking grab will be automatically released when @tfile is closed.
520  */
521 static int vmw_user_bo_synccpu_grab(struct vmw_buffer_object *vmw_bo,
522 				    uint32_t flags)
523 {
524 	bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
525 	struct ttm_buffer_object *bo = &vmw_bo->base;
526 	int ret;
527 
528 	if (flags & drm_vmw_synccpu_allow_cs) {
529 		long lret;
530 
531 		lret = dma_resv_wait_timeout(bo->base.resv, true, true,
532 					     nonblock ? 0 :
533 					     MAX_SCHEDULE_TIMEOUT);
534 		if (!lret)
535 			return -EBUSY;
536 		else if (lret < 0)
537 			return lret;
538 		return 0;
539 	}
540 
541 	ret = ttm_bo_reserve(bo, true, nonblock, NULL);
542 	if (unlikely(ret != 0))
543 		return ret;
544 
545 	ret = ttm_bo_wait(bo, true, nonblock);
546 	if (likely(ret == 0))
547 		atomic_inc(&vmw_bo->cpu_writers);
548 
549 	ttm_bo_unreserve(bo);
550 	if (unlikely(ret != 0))
551 		return ret;
552 
553 	return ret;
554 }
555 
556 /**
557  * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
558  * and unblock command submission on the buffer if blocked.
559  *
560  * @filp: Identifying the caller.
561  * @handle: Handle identifying the buffer object.
562  * @flags: Flags indicating the type of release.
563  */
564 static int vmw_user_bo_synccpu_release(struct drm_file *filp,
565 				       uint32_t handle,
566 				       uint32_t flags)
567 {
568 	struct vmw_buffer_object *vmw_bo;
569 	int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo);
570 
571 	if (!ret) {
572 		if (!(flags & drm_vmw_synccpu_allow_cs)) {
573 			atomic_dec(&vmw_bo->cpu_writers);
574 		}
575 		ttm_bo_put(&vmw_bo->base);
576 	}
577 
578 	return ret;
579 }
580 
581 
582 /**
583  * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
584  * functionality.
585  *
586  * @dev: Identifies the drm device.
587  * @data: Pointer to the ioctl argument.
588  * @file_priv: Identifies the caller.
589  * Return: Zero on success, negative error code on error.
590  *
591  * This function checks the ioctl arguments for validity and calls the
592  * relevant synccpu functions.
593  */
594 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
595 			      struct drm_file *file_priv)
596 {
597 	struct drm_vmw_synccpu_arg *arg =
598 		(struct drm_vmw_synccpu_arg *) data;
599 	struct vmw_buffer_object *vbo;
600 	int ret;
601 
602 	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
603 	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
604 			       drm_vmw_synccpu_dontblock |
605 			       drm_vmw_synccpu_allow_cs)) != 0) {
606 		DRM_ERROR("Illegal synccpu flags.\n");
607 		return -EINVAL;
608 	}
609 
610 	switch (arg->op) {
611 	case drm_vmw_synccpu_grab:
612 		ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo);
613 		if (unlikely(ret != 0))
614 			return ret;
615 
616 		ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
617 		vmw_bo_unreference(&vbo);
618 		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
619 			     ret != -EBUSY)) {
620 			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
621 				  (unsigned int) arg->handle);
622 			return ret;
623 		}
624 		break;
625 	case drm_vmw_synccpu_release:
626 		ret = vmw_user_bo_synccpu_release(file_priv,
627 						  arg->handle,
628 						  arg->flags);
629 		if (unlikely(ret != 0)) {
630 			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
631 				  (unsigned int) arg->handle);
632 			return ret;
633 		}
634 		break;
635 	default:
636 		DRM_ERROR("Invalid synccpu operation.\n");
637 		return -EINVAL;
638 	}
639 
640 	return 0;
641 }
642 
643 /**
644  * vmw_bo_unref_ioctl - Generic handle close ioctl.
645  *
646  * @dev: Identifies the drm device.
647  * @data: Pointer to the ioctl argument.
648  * @file_priv: Identifies the caller.
649  * Return: Zero on success, negative error code on error.
650  *
651  * This function checks the ioctl arguments for validity and closes a
652  * handle to a TTM base object, optionally freeing the object.
653  */
654 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
655 		       struct drm_file *file_priv)
656 {
657 	struct drm_vmw_unref_dmabuf_arg *arg =
658 	    (struct drm_vmw_unref_dmabuf_arg *)data;
659 
660 	drm_gem_handle_delete(file_priv, arg->handle);
661 	return 0;
662 }
663 
664 
665 /**
666  * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
667  *
668  * @filp: The file the handle is registered with.
669  * @handle: The user buffer object handle
670  * @out: Pointer to a where a pointer to the embedded
671  * struct vmw_buffer_object should be placed.
672  * Return: Zero on success, Negative error code on error.
673  *
674  * The vmw buffer object pointer will be refcounted.
675  */
676 int vmw_user_bo_lookup(struct drm_file *filp,
677 		       uint32_t handle,
678 		       struct vmw_buffer_object **out)
679 {
680 	struct drm_gem_object *gobj;
681 
682 	gobj = drm_gem_object_lookup(filp, handle);
683 	if (!gobj) {
684 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
685 			  (unsigned long)handle);
686 		return -ESRCH;
687 	}
688 
689 	*out = gem_to_vmw_bo(gobj);
690 	ttm_bo_get(&(*out)->base);
691 	drm_gem_object_put(gobj);
692 
693 	return 0;
694 }
695 
696 /**
697  * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
698  * @filp: The TTM object file the handle is registered with.
699  * @handle: The user buffer object handle.
700  *
701  * This function looks up a struct vmw_bo and returns a pointer to the
702  * struct vmw_buffer_object it derives from without refcounting the pointer.
703  * The returned pointer is only valid until vmw_user_bo_noref_release() is
704  * called, and the object pointed to by the returned pointer may be doomed.
705  * Any persistent usage of the object requires a refcount to be taken using
706  * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
707  * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
708  * or scheduling functions may be called inbetween these function calls.
709  *
710  * Return: A struct vmw_buffer_object pointer if successful or negative
711  * error pointer on failure.
712  */
713 struct vmw_buffer_object *
714 vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle)
715 {
716 	struct vmw_buffer_object *vmw_bo;
717 	struct ttm_buffer_object *bo;
718 	struct drm_gem_object *gobj = drm_gem_object_lookup(filp, handle);
719 
720 	if (!gobj) {
721 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
722 			  (unsigned long)handle);
723 		return ERR_PTR(-ESRCH);
724 	}
725 	vmw_bo = gem_to_vmw_bo(gobj);
726 	bo = ttm_bo_get_unless_zero(&vmw_bo->base);
727 	vmw_bo = vmw_buffer_object(bo);
728 	drm_gem_object_put(gobj);
729 
730 	return vmw_bo;
731 }
732 
733 
734 /**
735  * vmw_bo_fence_single - Utility function to fence a single TTM buffer
736  *                       object without unreserving it.
737  *
738  * @bo:             Pointer to the struct ttm_buffer_object to fence.
739  * @fence:          Pointer to the fence. If NULL, this function will
740  *                  insert a fence into the command stream..
741  *
742  * Contrary to the ttm_eu version of this function, it takes only
743  * a single buffer object instead of a list, and it also doesn't
744  * unreserve the buffer object, which needs to be done separately.
745  */
746 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
747 			 struct vmw_fence_obj *fence)
748 {
749 	struct ttm_device *bdev = bo->bdev;
750 
751 	struct vmw_private *dev_priv =
752 		container_of(bdev, struct vmw_private, bdev);
753 
754 	if (fence == NULL) {
755 		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
756 		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
757 		dma_fence_put(&fence->base);
758 	} else
759 		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
760 }
761 
762 
763 /**
764  * vmw_dumb_create - Create a dumb kms buffer
765  *
766  * @file_priv: Pointer to a struct drm_file identifying the caller.
767  * @dev: Pointer to the drm device.
768  * @args: Pointer to a struct drm_mode_create_dumb structure
769  * Return: Zero on success, negative error code on failure.
770  *
771  * This is a driver callback for the core drm create_dumb functionality.
772  * Note that this is very similar to the vmw_bo_alloc ioctl, except
773  * that the arguments have a different format.
774  */
775 int vmw_dumb_create(struct drm_file *file_priv,
776 		    struct drm_device *dev,
777 		    struct drm_mode_create_dumb *args)
778 {
779 	struct vmw_private *dev_priv = vmw_priv(dev);
780 	struct vmw_buffer_object *vbo;
781 	int ret;
782 
783 	args->pitch = args->width * ((args->bpp + 7) / 8);
784 	args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
785 
786 	ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
787 						args->size, &args->handle,
788 						&vbo);
789 
790 	return ret;
791 }
792 
793 /**
794  * vmw_bo_swap_notify - swapout notify callback.
795  *
796  * @bo: The buffer object to be swapped out.
797  */
798 void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
799 {
800 	/* Is @bo embedded in a struct vmw_buffer_object? */
801 	if (vmw_bo_is_vmw_bo(bo))
802 		return;
803 
804 	/* Kill any cached kernel maps before swapout */
805 	vmw_bo_unmap(vmw_buffer_object(bo));
806 }
807 
808 
809 /**
810  * vmw_bo_move_notify - TTM move_notify_callback
811  *
812  * @bo: The TTM buffer object about to move.
813  * @mem: The struct ttm_resource indicating to what memory
814  *       region the move is taking place.
815  *
816  * Detaches cached maps and device bindings that require that the
817  * buffer doesn't move.
818  */
819 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
820 			struct ttm_resource *mem)
821 {
822 	struct vmw_buffer_object *vbo;
823 
824 	/* Make sure @bo is embedded in a struct vmw_buffer_object? */
825 	if (vmw_bo_is_vmw_bo(bo))
826 		return;
827 
828 	vbo = container_of(bo, struct vmw_buffer_object, base);
829 
830 	/*
831 	 * Kill any cached kernel maps before move to or from VRAM.
832 	 * With other types of moves, the underlying pages stay the same,
833 	 * and the map can be kept.
834 	 */
835 	if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM)
836 		vmw_bo_unmap(vbo);
837 
838 	/*
839 	 * If we're moving a backup MOB out of MOB placement, then make sure we
840 	 * read back all resource content first, and unbind the MOB from
841 	 * the resource.
842 	 */
843 	if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
844 		vmw_resource_unbind_list(vbo);
845 }
846 
847 /**
848  * vmw_bo_is_vmw_bo - check if the buffer object is a &vmw_buffer_object
849  * @bo: buffer object to be checked
850  *
851  * Uses destroy function associated with the object to determine if this is
852  * a &vmw_buffer_object.
853  *
854  * Returns:
855  * true if the object is of &vmw_buffer_object type, false if not.
856  */
857 bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo)
858 {
859 	if (bo->destroy == &vmw_bo_bo_free ||
860 	    bo->destroy == &vmw_gem_destroy)
861 		return true;
862 
863 	return false;
864 }
865