xref: /openbmc/linux/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c (revision 6548d543)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright © 2011-2023 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 
29 #include "vmwgfx_bo.h"
30 #include "vmwgfx_drv.h"
31 
32 
33 #include <drm/ttm/ttm_placement.h>
34 
35 static void vmw_bo_release(struct vmw_bo *vbo)
36 {
37 	WARN_ON(vbo->tbo.base.funcs &&
38 		kref_read(&vbo->tbo.base.refcount) != 0);
39 	vmw_bo_unmap(vbo);
40 	drm_gem_object_release(&vbo->tbo.base);
41 }
42 
43 /**
44  * vmw_bo_free - vmw_bo destructor
45  *
46  * @bo: Pointer to the embedded struct ttm_buffer_object
47  */
48 static void vmw_bo_free(struct ttm_buffer_object *bo)
49 {
50 	struct vmw_bo *vbo = to_vmw_bo(&bo->base);
51 
52 	WARN_ON(vbo->dirty);
53 	WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
54 	vmw_bo_release(vbo);
55 	kfree(vbo);
56 }
57 
58 /**
59  * vmw_bo_pin_in_placement - Validate a buffer to placement.
60  *
61  * @dev_priv:  Driver private.
62  * @buf:  DMA buffer to move.
63  * @placement:  The placement to pin it.
64  * @interruptible:  Use interruptible wait.
65  * Return: Zero on success, Negative error code on failure. In particular
66  * -ERESTARTSYS if interrupted by a signal
67  */
68 static int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
69 				   struct vmw_bo *buf,
70 				   struct ttm_placement *placement,
71 				   bool interruptible)
72 {
73 	struct ttm_operation_ctx ctx = {interruptible, false };
74 	struct ttm_buffer_object *bo = &buf->tbo;
75 	int ret;
76 
77 	vmw_execbuf_release_pinned_bo(dev_priv);
78 
79 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
80 	if (unlikely(ret != 0))
81 		goto err;
82 
83 	ret = ttm_bo_validate(bo, placement, &ctx);
84 	if (!ret)
85 		vmw_bo_pin_reserved(buf, true);
86 
87 	ttm_bo_unreserve(bo);
88 err:
89 	return ret;
90 }
91 
92 
93 /**
94  * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
95  *
96  * This function takes the reservation_sem in write mode.
97  * Flushes and unpins the query bo to avoid failures.
98  *
99  * @dev_priv:  Driver private.
100  * @buf:  DMA buffer to move.
101  * @interruptible:  Use interruptible wait.
102  * Return: Zero on success, Negative error code on failure. In particular
103  * -ERESTARTSYS if interrupted by a signal
104  */
105 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
106 			      struct vmw_bo *buf,
107 			      bool interruptible)
108 {
109 	struct ttm_operation_ctx ctx = {interruptible, false };
110 	struct ttm_buffer_object *bo = &buf->tbo;
111 	int ret;
112 
113 	vmw_execbuf_release_pinned_bo(dev_priv);
114 
115 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
116 	if (unlikely(ret != 0))
117 		goto err;
118 
119 	vmw_bo_placement_set(buf,
120 			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
121 			     VMW_BO_DOMAIN_GMR);
122 	ret = ttm_bo_validate(bo, &buf->placement, &ctx);
123 	if (likely(ret == 0) || ret == -ERESTARTSYS)
124 		goto out_unreserve;
125 
126 	vmw_bo_placement_set(buf,
127 			     VMW_BO_DOMAIN_VRAM,
128 			     VMW_BO_DOMAIN_VRAM);
129 	ret = ttm_bo_validate(bo, &buf->placement, &ctx);
130 
131 out_unreserve:
132 	if (!ret)
133 		vmw_bo_pin_reserved(buf, true);
134 
135 	ttm_bo_unreserve(bo);
136 err:
137 	return ret;
138 }
139 
140 
141 /**
142  * vmw_bo_pin_in_vram - Move a buffer to vram.
143  *
144  * This function takes the reservation_sem in write mode.
145  * Flushes and unpins the query bo to avoid failures.
146  *
147  * @dev_priv:  Driver private.
148  * @buf:  DMA buffer to move.
149  * @interruptible:  Use interruptible wait.
150  * Return: Zero on success, Negative error code on failure. In particular
151  * -ERESTARTSYS if interrupted by a signal
152  */
153 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
154 		       struct vmw_bo *buf,
155 		       bool interruptible)
156 {
157 	return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
158 				       interruptible);
159 }
160 
161 
162 /**
163  * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
164  *
165  * This function takes the reservation_sem in write mode.
166  * Flushes and unpins the query bo to avoid failures.
167  *
168  * @dev_priv:  Driver private.
169  * @buf:  DMA buffer to pin.
170  * @interruptible:  Use interruptible wait.
171  * Return: Zero on success, Negative error code on failure. In particular
172  * -ERESTARTSYS if interrupted by a signal
173  */
174 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
175 				struct vmw_bo *buf,
176 				bool interruptible)
177 {
178 	struct ttm_operation_ctx ctx = {interruptible, false };
179 	struct ttm_buffer_object *bo = &buf->tbo;
180 	int ret = 0;
181 
182 	vmw_execbuf_release_pinned_bo(dev_priv);
183 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
184 	if (unlikely(ret != 0))
185 		goto err_unlock;
186 
187 	/*
188 	 * Is this buffer already in vram but not at the start of it?
189 	 * In that case, evict it first because TTM isn't good at handling
190 	 * that situation.
191 	 */
192 	if (bo->resource->mem_type == TTM_PL_VRAM &&
193 	    bo->resource->start < PFN_UP(bo->resource->size) &&
194 	    bo->resource->start > 0 &&
195 	    buf->tbo.pin_count == 0) {
196 		ctx.interruptible = false;
197 		vmw_bo_placement_set(buf,
198 				     VMW_BO_DOMAIN_SYS,
199 				     VMW_BO_DOMAIN_SYS);
200 		(void)ttm_bo_validate(bo, &buf->placement, &ctx);
201 	}
202 
203 	vmw_bo_placement_set(buf,
204 			     VMW_BO_DOMAIN_VRAM,
205 			     VMW_BO_DOMAIN_VRAM);
206 	buf->places[0].lpfn = PFN_UP(bo->resource->size);
207 	buf->busy_places[0].lpfn = PFN_UP(bo->resource->size);
208 	ret = ttm_bo_validate(bo, &buf->placement, &ctx);
209 
210 	/* For some reason we didn't end up at the start of vram */
211 	WARN_ON(ret == 0 && bo->resource->start != 0);
212 	if (!ret)
213 		vmw_bo_pin_reserved(buf, true);
214 
215 	ttm_bo_unreserve(bo);
216 err_unlock:
217 
218 	return ret;
219 }
220 
221 
222 /**
223  * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
224  *
225  * This function takes the reservation_sem in write mode.
226  *
227  * @dev_priv:  Driver private.
228  * @buf:  DMA buffer to unpin.
229  * @interruptible:  Use interruptible wait.
230  * Return: Zero on success, Negative error code on failure. In particular
231  * -ERESTARTSYS if interrupted by a signal
232  */
233 int vmw_bo_unpin(struct vmw_private *dev_priv,
234 		 struct vmw_bo *buf,
235 		 bool interruptible)
236 {
237 	struct ttm_buffer_object *bo = &buf->tbo;
238 	int ret;
239 
240 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
241 	if (unlikely(ret != 0))
242 		goto err;
243 
244 	vmw_bo_pin_reserved(buf, false);
245 
246 	ttm_bo_unreserve(bo);
247 
248 err:
249 	return ret;
250 }
251 
252 /**
253  * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
254  * of a buffer.
255  *
256  * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
257  * @ptr: SVGAGuestPtr returning the result.
258  */
259 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
260 			  SVGAGuestPtr *ptr)
261 {
262 	if (bo->resource->mem_type == TTM_PL_VRAM) {
263 		ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
264 		ptr->offset = bo->resource->start << PAGE_SHIFT;
265 	} else {
266 		ptr->gmrId = bo->resource->start;
267 		ptr->offset = 0;
268 	}
269 }
270 
271 
272 /**
273  * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
274  *
275  * @vbo: The buffer object. Must be reserved.
276  * @pin: Whether to pin or unpin.
277  *
278  */
279 void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin)
280 {
281 	struct ttm_operation_ctx ctx = { false, true };
282 	struct ttm_place pl;
283 	struct ttm_placement placement;
284 	struct ttm_buffer_object *bo = &vbo->tbo;
285 	uint32_t old_mem_type = bo->resource->mem_type;
286 	int ret;
287 
288 	dma_resv_assert_held(bo->base.resv);
289 
290 	if (pin == !!bo->pin_count)
291 		return;
292 
293 	pl.fpfn = 0;
294 	pl.lpfn = 0;
295 	pl.mem_type = bo->resource->mem_type;
296 	pl.flags = bo->resource->placement;
297 
298 	memset(&placement, 0, sizeof(placement));
299 	placement.num_placement = 1;
300 	placement.placement = &pl;
301 
302 	ret = ttm_bo_validate(bo, &placement, &ctx);
303 
304 	BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type);
305 
306 	if (pin)
307 		ttm_bo_pin(bo);
308 	else
309 		ttm_bo_unpin(bo);
310 }
311 
312 /**
313  * vmw_bo_map_and_cache - Map a buffer object and cache the map
314  *
315  * @vbo: The buffer object to map
316  * Return: A kernel virtual address or NULL if mapping failed.
317  *
318  * This function maps a buffer object into the kernel address space, or
319  * returns the virtual kernel address of an already existing map. The virtual
320  * address remains valid as long as the buffer object is pinned or reserved.
321  * The cached map is torn down on either
322  * 1) Buffer object move
323  * 2) Buffer object swapout
324  * 3) Buffer object destruction
325  *
326  */
327 void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
328 {
329 	struct ttm_buffer_object *bo = &vbo->tbo;
330 	bool not_used;
331 	void *virtual;
332 	int ret;
333 
334 	virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
335 	if (virtual)
336 		return virtual;
337 
338 	ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map);
339 	if (ret)
340 		DRM_ERROR("Buffer object map failed: %d.\n", ret);
341 
342 	return ttm_kmap_obj_virtual(&vbo->map, &not_used);
343 }
344 
345 
346 /**
347  * vmw_bo_unmap - Tear down a cached buffer object map.
348  *
349  * @vbo: The buffer object whose map we are tearing down.
350  *
351  * This function tears down a cached map set up using
352  * vmw_bo_map_and_cache().
353  */
354 void vmw_bo_unmap(struct vmw_bo *vbo)
355 {
356 	if (vbo->map.bo == NULL)
357 		return;
358 
359 	ttm_bo_kunmap(&vbo->map);
360 	vbo->map.bo = NULL;
361 }
362 
363 
364 /**
365  * vmw_bo_init - Initialize a vmw buffer object
366  *
367  * @dev_priv: Pointer to the device private struct
368  * @vmw_bo: Buffer object to initialize
369  * @params: Parameters used to initialize the buffer object
370  * @destroy: The function used to delete the buffer object
371  * Returns: Zero on success, negative error code on error.
372  *
373  */
374 static int vmw_bo_init(struct vmw_private *dev_priv,
375 		       struct vmw_bo *vmw_bo,
376 		       struct vmw_bo_params *params,
377 		       void (*destroy)(struct ttm_buffer_object *))
378 {
379 	struct ttm_operation_ctx ctx = {
380 		.interruptible = params->bo_type != ttm_bo_type_kernel,
381 		.no_wait_gpu = false,
382 		.resv = params->resv,
383 	};
384 	struct ttm_device *bdev = &dev_priv->bdev;
385 	struct drm_device *vdev = &dev_priv->drm;
386 	int ret;
387 
388 	memset(vmw_bo, 0, sizeof(*vmw_bo));
389 
390 	BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
391 	vmw_bo->tbo.priority = 3;
392 	vmw_bo->res_tree = RB_ROOT;
393 
394 	params->size = ALIGN(params->size, PAGE_SIZE);
395 	drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
396 
397 	vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
398 	ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
399 				   &vmw_bo->placement, 0, &ctx,
400 				   params->sg, params->resv, destroy);
401 	if (unlikely(ret))
402 		return ret;
403 
404 	if (params->pin)
405 		ttm_bo_pin(&vmw_bo->tbo);
406 	ttm_bo_unreserve(&vmw_bo->tbo);
407 
408 	return 0;
409 }
410 
411 int vmw_bo_create(struct vmw_private *vmw,
412 		  struct vmw_bo_params *params,
413 		  struct vmw_bo **p_bo)
414 {
415 	int ret;
416 
417 	*p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL);
418 	if (unlikely(!*p_bo)) {
419 		DRM_ERROR("Failed to allocate a buffer.\n");
420 		return -ENOMEM;
421 	}
422 
423 	/*
424 	 * vmw_bo_init will delete the *p_bo object if it fails
425 	 */
426 	ret = vmw_bo_init(vmw, *p_bo, params, vmw_bo_free);
427 	if (unlikely(ret != 0))
428 		goto out_error;
429 
430 	return ret;
431 out_error:
432 	*p_bo = NULL;
433 	return ret;
434 }
435 
436 /**
437  * vmw_user_bo_synccpu_grab - Grab a struct vmw_bo for cpu
438  * access, idling previous GPU operations on the buffer and optionally
439  * blocking it for further command submissions.
440  *
441  * @vmw_bo: Pointer to the buffer object being grabbed for CPU access
442  * @flags: Flags indicating how the grab should be performed.
443  * Return: Zero on success, Negative error code on error. In particular,
444  * -EBUSY will be returned if a dontblock operation is requested and the
445  * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
446  * interrupted by a signal.
447  *
448  * A blocking grab will be automatically released when @tfile is closed.
449  */
450 static int vmw_user_bo_synccpu_grab(struct vmw_bo *vmw_bo,
451 				    uint32_t flags)
452 {
453 	bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
454 	struct ttm_buffer_object *bo = &vmw_bo->tbo;
455 	int ret;
456 
457 	if (flags & drm_vmw_synccpu_allow_cs) {
458 		long lret;
459 
460 		lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ,
461 					     true, nonblock ? 0 :
462 					     MAX_SCHEDULE_TIMEOUT);
463 		if (!lret)
464 			return -EBUSY;
465 		else if (lret < 0)
466 			return lret;
467 		return 0;
468 	}
469 
470 	ret = ttm_bo_reserve(bo, true, nonblock, NULL);
471 	if (unlikely(ret != 0))
472 		return ret;
473 
474 	ret = ttm_bo_wait(bo, true, nonblock);
475 	if (likely(ret == 0))
476 		atomic_inc(&vmw_bo->cpu_writers);
477 
478 	ttm_bo_unreserve(bo);
479 	if (unlikely(ret != 0))
480 		return ret;
481 
482 	return ret;
483 }
484 
485 /**
486  * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
487  * and unblock command submission on the buffer if blocked.
488  *
489  * @filp: Identifying the caller.
490  * @handle: Handle identifying the buffer object.
491  * @flags: Flags indicating the type of release.
492  */
493 static int vmw_user_bo_synccpu_release(struct drm_file *filp,
494 				       uint32_t handle,
495 				       uint32_t flags)
496 {
497 	struct vmw_bo *vmw_bo;
498 	int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo);
499 
500 	if (!ret) {
501 		if (!(flags & drm_vmw_synccpu_allow_cs)) {
502 			atomic_dec(&vmw_bo->cpu_writers);
503 		}
504 		vmw_user_bo_unref(&vmw_bo);
505 	}
506 
507 	return ret;
508 }
509 
510 
511 /**
512  * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
513  * functionality.
514  *
515  * @dev: Identifies the drm device.
516  * @data: Pointer to the ioctl argument.
517  * @file_priv: Identifies the caller.
518  * Return: Zero on success, negative error code on error.
519  *
520  * This function checks the ioctl arguments for validity and calls the
521  * relevant synccpu functions.
522  */
523 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
524 			      struct drm_file *file_priv)
525 {
526 	struct drm_vmw_synccpu_arg *arg =
527 		(struct drm_vmw_synccpu_arg *) data;
528 	struct vmw_bo *vbo;
529 	int ret;
530 
531 	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
532 	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
533 			       drm_vmw_synccpu_dontblock |
534 			       drm_vmw_synccpu_allow_cs)) != 0) {
535 		DRM_ERROR("Illegal synccpu flags.\n");
536 		return -EINVAL;
537 	}
538 
539 	switch (arg->op) {
540 	case drm_vmw_synccpu_grab:
541 		ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo);
542 		if (unlikely(ret != 0))
543 			return ret;
544 
545 		ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
546 		vmw_user_bo_unref(&vbo);
547 		if (unlikely(ret != 0)) {
548 			if (ret == -ERESTARTSYS || ret == -EBUSY)
549 				return -EBUSY;
550 			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
551 				  (unsigned int) arg->handle);
552 			return ret;
553 		}
554 		break;
555 	case drm_vmw_synccpu_release:
556 		ret = vmw_user_bo_synccpu_release(file_priv,
557 						  arg->handle,
558 						  arg->flags);
559 		if (unlikely(ret != 0)) {
560 			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
561 				  (unsigned int) arg->handle);
562 			return ret;
563 		}
564 		break;
565 	default:
566 		DRM_ERROR("Invalid synccpu operation.\n");
567 		return -EINVAL;
568 	}
569 
570 	return 0;
571 }
572 
573 /**
574  * vmw_bo_unref_ioctl - Generic handle close ioctl.
575  *
576  * @dev: Identifies the drm device.
577  * @data: Pointer to the ioctl argument.
578  * @file_priv: Identifies the caller.
579  * Return: Zero on success, negative error code on error.
580  *
581  * This function checks the ioctl arguments for validity and closes a
582  * handle to a TTM base object, optionally freeing the object.
583  */
584 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
585 		       struct drm_file *file_priv)
586 {
587 	struct drm_vmw_unref_dmabuf_arg *arg =
588 	    (struct drm_vmw_unref_dmabuf_arg *)data;
589 
590 	return drm_gem_handle_delete(file_priv, arg->handle);
591 }
592 
593 
594 /**
595  * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
596  *
597  * @filp: The file the handle is registered with.
598  * @handle: The user buffer object handle
599  * @out: Pointer to a where a pointer to the embedded
600  * struct vmw_bo should be placed.
601  * Return: Zero on success, Negative error code on error.
602  *
603  * The vmw buffer object pointer will be refcounted (both ttm and gem)
604  */
605 int vmw_user_bo_lookup(struct drm_file *filp,
606 		       u32 handle,
607 		       struct vmw_bo **out)
608 {
609 	struct drm_gem_object *gobj;
610 
611 	gobj = drm_gem_object_lookup(filp, handle);
612 	if (!gobj) {
613 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
614 			  (unsigned long)handle);
615 		return -ESRCH;
616 	}
617 
618 	*out = to_vmw_bo(gobj);
619 
620 	return 0;
621 }
622 
623 /**
624  * vmw_bo_fence_single - Utility function to fence a single TTM buffer
625  *                       object without unreserving it.
626  *
627  * @bo:             Pointer to the struct ttm_buffer_object to fence.
628  * @fence:          Pointer to the fence. If NULL, this function will
629  *                  insert a fence into the command stream..
630  *
631  * Contrary to the ttm_eu version of this function, it takes only
632  * a single buffer object instead of a list, and it also doesn't
633  * unreserve the buffer object, which needs to be done separately.
634  */
635 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
636 			 struct vmw_fence_obj *fence)
637 {
638 	struct ttm_device *bdev = bo->bdev;
639 	struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
640 	int ret;
641 
642 	if (fence == NULL)
643 		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
644 	else
645 		dma_fence_get(&fence->base);
646 
647 	ret = dma_resv_reserve_fences(bo->base.resv, 1);
648 	if (!ret)
649 		dma_resv_add_fence(bo->base.resv, &fence->base,
650 				   DMA_RESV_USAGE_KERNEL);
651 	else
652 		/* Last resort fallback when we are OOM */
653 		dma_fence_wait(&fence->base, false);
654 	dma_fence_put(&fence->base);
655 }
656 
657 
658 /**
659  * vmw_dumb_create - Create a dumb kms buffer
660  *
661  * @file_priv: Pointer to a struct drm_file identifying the caller.
662  * @dev: Pointer to the drm device.
663  * @args: Pointer to a struct drm_mode_create_dumb structure
664  * Return: Zero on success, negative error code on failure.
665  *
666  * This is a driver callback for the core drm create_dumb functionality.
667  * Note that this is very similar to the vmw_bo_alloc ioctl, except
668  * that the arguments have a different format.
669  */
670 int vmw_dumb_create(struct drm_file *file_priv,
671 		    struct drm_device *dev,
672 		    struct drm_mode_create_dumb *args)
673 {
674 	struct vmw_private *dev_priv = vmw_priv(dev);
675 	struct vmw_bo *vbo;
676 	int cpp = DIV_ROUND_UP(args->bpp, 8);
677 	int ret;
678 
679 	switch (cpp) {
680 	case 1: /* DRM_FORMAT_C8 */
681 	case 2: /* DRM_FORMAT_RGB565 */
682 	case 4: /* DRM_FORMAT_XRGB8888 */
683 		break;
684 	default:
685 		/*
686 		 * Dumb buffers don't allow anything else.
687 		 * This is tested via IGT's dumb_buffers
688 		 */
689 		return -EINVAL;
690 	}
691 
692 	args->pitch = args->width * cpp;
693 	args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
694 
695 	ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
696 						args->size, &args->handle,
697 						&vbo);
698 	/* drop reference from allocate - handle holds it now */
699 	drm_gem_object_put(&vbo->tbo.base);
700 	return ret;
701 }
702 
703 /**
704  * vmw_bo_swap_notify - swapout notify callback.
705  *
706  * @bo: The buffer object to be swapped out.
707  */
708 void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
709 {
710 	/* Kill any cached kernel maps before swapout */
711 	vmw_bo_unmap(to_vmw_bo(&bo->base));
712 }
713 
714 
715 /**
716  * vmw_bo_move_notify - TTM move_notify_callback
717  *
718  * @bo: The TTM buffer object about to move.
719  * @mem: The struct ttm_resource indicating to what memory
720  *       region the move is taking place.
721  *
722  * Detaches cached maps and device bindings that require that the
723  * buffer doesn't move.
724  */
725 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
726 			struct ttm_resource *mem)
727 {
728 	struct vmw_bo *vbo = to_vmw_bo(&bo->base);
729 
730 	/*
731 	 * Kill any cached kernel maps before move to or from VRAM.
732 	 * With other types of moves, the underlying pages stay the same,
733 	 * and the map can be kept.
734 	 */
735 	if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM)
736 		vmw_bo_unmap(vbo);
737 
738 	/*
739 	 * If we're moving a backup MOB out of MOB placement, then make sure we
740 	 * read back all resource content first, and unbind the MOB from
741 	 * the resource.
742 	 */
743 	if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
744 		vmw_resource_unbind_list(vbo);
745 }
746 
747 static u32
748 set_placement_list(struct ttm_place *pl, u32 domain)
749 {
750 	u32 n = 0;
751 
752 	/*
753 	 * The placements are ordered according to our preferences
754 	 */
755 	if (domain & VMW_BO_DOMAIN_MOB) {
756 		pl[n].mem_type = VMW_PL_MOB;
757 		pl[n].flags = 0;
758 		pl[n].fpfn = 0;
759 		pl[n].lpfn = 0;
760 		n++;
761 	}
762 	if (domain & VMW_BO_DOMAIN_GMR) {
763 		pl[n].mem_type = VMW_PL_GMR;
764 		pl[n].flags = 0;
765 		pl[n].fpfn = 0;
766 		pl[n].lpfn = 0;
767 		n++;
768 	}
769 	if (domain & VMW_BO_DOMAIN_VRAM) {
770 		pl[n].mem_type = TTM_PL_VRAM;
771 		pl[n].flags = 0;
772 		pl[n].fpfn = 0;
773 		pl[n].lpfn = 0;
774 		n++;
775 	}
776 	if (domain & VMW_BO_DOMAIN_WAITABLE_SYS) {
777 		pl[n].mem_type = VMW_PL_SYSTEM;
778 		pl[n].flags = 0;
779 		pl[n].fpfn = 0;
780 		pl[n].lpfn = 0;
781 		n++;
782 	}
783 	if (domain & VMW_BO_DOMAIN_SYS) {
784 		pl[n].mem_type = TTM_PL_SYSTEM;
785 		pl[n].flags = 0;
786 		pl[n].fpfn = 0;
787 		pl[n].lpfn = 0;
788 		n++;
789 	}
790 
791 	WARN_ON(!n);
792 	if (!n) {
793 		pl[n].mem_type = TTM_PL_SYSTEM;
794 		pl[n].flags = 0;
795 		pl[n].fpfn = 0;
796 		pl[n].lpfn = 0;
797 		n++;
798 	}
799 	return n;
800 }
801 
802 void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
803 {
804 	struct ttm_device *bdev = bo->tbo.bdev;
805 	struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
806 	struct ttm_placement *pl = &bo->placement;
807 	bool mem_compatible = false;
808 	u32 i;
809 
810 	pl->placement = bo->places;
811 	pl->num_placement = set_placement_list(bo->places, domain);
812 
813 	if (drm_debug_enabled(DRM_UT_DRIVER) && bo->tbo.resource) {
814 		for (i = 0; i < pl->num_placement; ++i) {
815 			if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM ||
816 			    bo->tbo.resource->mem_type == pl->placement[i].mem_type)
817 				mem_compatible = true;
818 		}
819 		if (!mem_compatible)
820 			drm_warn(&vmw->drm,
821 				 "%s: Incompatible transition from "
822 				 "bo->base.resource->mem_type = %u to domain = %u\n",
823 				 __func__, bo->tbo.resource->mem_type, domain);
824 	}
825 
826 	pl->busy_placement = bo->busy_places;
827 	pl->num_busy_placement = set_placement_list(bo->busy_places, busy_domain);
828 }
829 
830 void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
831 {
832 	struct ttm_device *bdev = bo->tbo.bdev;
833 	struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
834 	u32 domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM;
835 
836 	if (vmw->has_mob)
837 		domain = VMW_BO_DOMAIN_MOB;
838 
839 	vmw_bo_placement_set(bo, domain, domain);
840 }
841