xref: /openbmc/linux/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c (revision df0e68c1)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 
29 #include <drm/ttm/ttm_placement.h>
30 
31 #include "vmwgfx_drv.h"
32 #include "ttm_object.h"
33 
34 
35 /**
36  * struct vmw_user_buffer_object - User-space-visible buffer object
37  *
38  * @prime: The prime object providing user visibility.
39  * @vbo: The struct vmw_buffer_object
40  */
41 struct vmw_user_buffer_object {
42 	struct ttm_prime_object prime;
43 	struct vmw_buffer_object vbo;
44 };
45 
46 
47 /**
48  * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
49  * vmw_buffer_object.
50  *
51  * @bo: Pointer to the TTM buffer object.
52  * Return: Pointer to the struct vmw_buffer_object embedding the
53  * TTM buffer object.
54  */
55 static struct vmw_buffer_object *
56 vmw_buffer_object(struct ttm_buffer_object *bo)
57 {
58 	return container_of(bo, struct vmw_buffer_object, base);
59 }
60 
61 
62 /**
63  * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
64  * vmw_user_buffer_object.
65  *
66  * @bo: Pointer to the TTM buffer object.
67  * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
68  * object.
69  */
70 static struct vmw_user_buffer_object *
71 vmw_user_buffer_object(struct ttm_buffer_object *bo)
72 {
73 	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
74 
75 	return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
76 }
77 
78 
79 /**
80  * vmw_bo_pin_in_placement - Validate a buffer to placement.
81  *
82  * @dev_priv:  Driver private.
83  * @buf:  DMA buffer to move.
84  * @placement:  The placement to pin it.
85  * @interruptible:  Use interruptible wait.
86  * Return: Zero on success, Negative error code on failure. In particular
87  * -ERESTARTSYS if interrupted by a signal
88  */
89 int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
90 			    struct vmw_buffer_object *buf,
91 			    struct ttm_placement *placement,
92 			    bool interruptible)
93 {
94 	struct ttm_operation_ctx ctx = {interruptible, false };
95 	struct ttm_buffer_object *bo = &buf->base;
96 	int ret;
97 
98 	vmw_execbuf_release_pinned_bo(dev_priv);
99 
100 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
101 	if (unlikely(ret != 0))
102 		goto err;
103 
104 	if (buf->base.pin_count > 0)
105 		ret = ttm_resource_compat(bo->resource, placement)
106 			? 0 : -EINVAL;
107 	else
108 		ret = ttm_bo_validate(bo, placement, &ctx);
109 
110 	if (!ret)
111 		vmw_bo_pin_reserved(buf, true);
112 
113 	ttm_bo_unreserve(bo);
114 err:
115 	return ret;
116 }
117 
118 
119 /**
120  * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
121  *
122  * This function takes the reservation_sem in write mode.
123  * Flushes and unpins the query bo to avoid failures.
124  *
125  * @dev_priv:  Driver private.
126  * @buf:  DMA buffer to move.
127  * @interruptible:  Use interruptible wait.
128  * Return: Zero on success, Negative error code on failure. In particular
129  * -ERESTARTSYS if interrupted by a signal
130  */
131 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
132 			      struct vmw_buffer_object *buf,
133 			      bool interruptible)
134 {
135 	struct ttm_operation_ctx ctx = {interruptible, false };
136 	struct ttm_buffer_object *bo = &buf->base;
137 	int ret;
138 
139 	vmw_execbuf_release_pinned_bo(dev_priv);
140 
141 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
142 	if (unlikely(ret != 0))
143 		goto err;
144 
145 	if (buf->base.pin_count > 0) {
146 		ret = ttm_resource_compat(bo->resource, &vmw_vram_gmr_placement)
147 			? 0 : -EINVAL;
148 		goto out_unreserve;
149 	}
150 
151 	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
152 	if (likely(ret == 0) || ret == -ERESTARTSYS)
153 		goto out_unreserve;
154 
155 	ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
156 
157 out_unreserve:
158 	if (!ret)
159 		vmw_bo_pin_reserved(buf, true);
160 
161 	ttm_bo_unreserve(bo);
162 err:
163 	return ret;
164 }
165 
166 
167 /**
168  * vmw_bo_pin_in_vram - Move a buffer to vram.
169  *
170  * This function takes the reservation_sem in write mode.
171  * Flushes and unpins the query bo to avoid failures.
172  *
173  * @dev_priv:  Driver private.
174  * @buf:  DMA buffer to move.
175  * @interruptible:  Use interruptible wait.
176  * Return: Zero on success, Negative error code on failure. In particular
177  * -ERESTARTSYS if interrupted by a signal
178  */
179 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
180 		       struct vmw_buffer_object *buf,
181 		       bool interruptible)
182 {
183 	return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
184 				       interruptible);
185 }
186 
187 
188 /**
189  * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
190  *
191  * This function takes the reservation_sem in write mode.
192  * Flushes and unpins the query bo to avoid failures.
193  *
194  * @dev_priv:  Driver private.
195  * @buf:  DMA buffer to pin.
196  * @interruptible:  Use interruptible wait.
197  * Return: Zero on success, Negative error code on failure. In particular
198  * -ERESTARTSYS if interrupted by a signal
199  */
200 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
201 				struct vmw_buffer_object *buf,
202 				bool interruptible)
203 {
204 	struct ttm_operation_ctx ctx = {interruptible, false };
205 	struct ttm_buffer_object *bo = &buf->base;
206 	struct ttm_placement placement;
207 	struct ttm_place place;
208 	int ret = 0;
209 
210 	place = vmw_vram_placement.placement[0];
211 	place.lpfn = bo->resource->num_pages;
212 	placement.num_placement = 1;
213 	placement.placement = &place;
214 	placement.num_busy_placement = 1;
215 	placement.busy_placement = &place;
216 
217 	vmw_execbuf_release_pinned_bo(dev_priv);
218 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
219 	if (unlikely(ret != 0))
220 		goto err_unlock;
221 
222 	/*
223 	 * Is this buffer already in vram but not at the start of it?
224 	 * In that case, evict it first because TTM isn't good at handling
225 	 * that situation.
226 	 */
227 	if (bo->resource->mem_type == TTM_PL_VRAM &&
228 	    bo->resource->start < bo->resource->num_pages &&
229 	    bo->resource->start > 0 &&
230 	    buf->base.pin_count == 0) {
231 		ctx.interruptible = false;
232 		(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
233 	}
234 
235 	if (buf->base.pin_count > 0)
236 		ret = ttm_resource_compat(bo->resource, &placement)
237 			? 0 : -EINVAL;
238 	else
239 		ret = ttm_bo_validate(bo, &placement, &ctx);
240 
241 	/* For some reason we didn't end up at the start of vram */
242 	WARN_ON(ret == 0 && bo->resource->start != 0);
243 	if (!ret)
244 		vmw_bo_pin_reserved(buf, true);
245 
246 	ttm_bo_unreserve(bo);
247 err_unlock:
248 
249 	return ret;
250 }
251 
252 
253 /**
254  * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
255  *
256  * This function takes the reservation_sem in write mode.
257  *
258  * @dev_priv:  Driver private.
259  * @buf:  DMA buffer to unpin.
260  * @interruptible:  Use interruptible wait.
261  * Return: Zero on success, Negative error code on failure. In particular
262  * -ERESTARTSYS if interrupted by a signal
263  */
264 int vmw_bo_unpin(struct vmw_private *dev_priv,
265 		 struct vmw_buffer_object *buf,
266 		 bool interruptible)
267 {
268 	struct ttm_buffer_object *bo = &buf->base;
269 	int ret;
270 
271 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
272 	if (unlikely(ret != 0))
273 		goto err;
274 
275 	vmw_bo_pin_reserved(buf, false);
276 
277 	ttm_bo_unreserve(bo);
278 
279 err:
280 	return ret;
281 }
282 
283 /**
284  * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
285  * of a buffer.
286  *
287  * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
288  * @ptr: SVGAGuestPtr returning the result.
289  */
290 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
291 			  SVGAGuestPtr *ptr)
292 {
293 	if (bo->resource->mem_type == TTM_PL_VRAM) {
294 		ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
295 		ptr->offset = bo->resource->start << PAGE_SHIFT;
296 	} else {
297 		ptr->gmrId = bo->resource->start;
298 		ptr->offset = 0;
299 	}
300 }
301 
302 
303 /**
304  * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
305  *
306  * @vbo: The buffer object. Must be reserved.
307  * @pin: Whether to pin or unpin.
308  *
309  */
310 void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
311 {
312 	struct ttm_operation_ctx ctx = { false, true };
313 	struct ttm_place pl;
314 	struct ttm_placement placement;
315 	struct ttm_buffer_object *bo = &vbo->base;
316 	uint32_t old_mem_type = bo->resource->mem_type;
317 	int ret;
318 
319 	dma_resv_assert_held(bo->base.resv);
320 
321 	if (pin == !!bo->pin_count)
322 		return;
323 
324 	pl.fpfn = 0;
325 	pl.lpfn = 0;
326 	pl.mem_type = bo->resource->mem_type;
327 	pl.flags = bo->resource->placement;
328 
329 	memset(&placement, 0, sizeof(placement));
330 	placement.num_placement = 1;
331 	placement.placement = &pl;
332 
333 	ret = ttm_bo_validate(bo, &placement, &ctx);
334 
335 	BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type);
336 
337 	if (pin)
338 		ttm_bo_pin(bo);
339 	else
340 		ttm_bo_unpin(bo);
341 }
342 
343 /**
344  * vmw_bo_map_and_cache - Map a buffer object and cache the map
345  *
346  * @vbo: The buffer object to map
347  * Return: A kernel virtual address or NULL if mapping failed.
348  *
349  * This function maps a buffer object into the kernel address space, or
350  * returns the virtual kernel address of an already existing map. The virtual
351  * address remains valid as long as the buffer object is pinned or reserved.
352  * The cached map is torn down on either
353  * 1) Buffer object move
354  * 2) Buffer object swapout
355  * 3) Buffer object destruction
356  *
357  */
358 void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
359 {
360 	struct ttm_buffer_object *bo = &vbo->base;
361 	bool not_used;
362 	void *virtual;
363 	int ret;
364 
365 	virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
366 	if (virtual)
367 		return virtual;
368 
369 	ret = ttm_bo_kmap(bo, 0, bo->resource->num_pages, &vbo->map);
370 	if (ret)
371 		DRM_ERROR("Buffer object map failed: %d.\n", ret);
372 
373 	return ttm_kmap_obj_virtual(&vbo->map, &not_used);
374 }
375 
376 
377 /**
378  * vmw_bo_unmap - Tear down a cached buffer object map.
379  *
380  * @vbo: The buffer object whose map we are tearing down.
381  *
382  * This function tears down a cached map set up using
383  * vmw_buffer_object_map_and_cache().
384  */
385 void vmw_bo_unmap(struct vmw_buffer_object *vbo)
386 {
387 	if (vbo->map.bo == NULL)
388 		return;
389 
390 	ttm_bo_kunmap(&vbo->map);
391 }
392 
393 
394 /**
395  * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
396  *
397  * @dev_priv: Pointer to a struct vmw_private identifying the device.
398  * @size: The requested buffer size.
399  * @user: Whether this is an ordinary dma buffer or a user dma buffer.
400  */
401 static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
402 			      bool user)
403 {
404 	static size_t struct_size, user_struct_size;
405 	size_t num_pages = PFN_UP(size);
406 	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
407 
408 	if (unlikely(struct_size == 0)) {
409 		size_t backend_size = ttm_round_pot(vmw_tt_size);
410 
411 		struct_size = backend_size +
412 			ttm_round_pot(sizeof(struct vmw_buffer_object));
413 		user_struct_size = backend_size +
414 		  ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
415 				      TTM_OBJ_EXTRA_SIZE;
416 	}
417 
418 	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
419 		page_array_size +=
420 			ttm_round_pot(num_pages * sizeof(dma_addr_t));
421 
422 	return ((user) ? user_struct_size : struct_size) +
423 		page_array_size;
424 }
425 
426 
427 /**
428  * vmw_bo_bo_free - vmw buffer object destructor
429  *
430  * @bo: Pointer to the embedded struct ttm_buffer_object
431  */
432 void vmw_bo_bo_free(struct ttm_buffer_object *bo)
433 {
434 	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
435 
436 	WARN_ON(vmw_bo->dirty);
437 	WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
438 	vmw_bo_unmap(vmw_bo);
439 	dma_resv_fini(&bo->base._resv);
440 	kfree(vmw_bo);
441 }
442 
443 
444 /**
445  * vmw_user_bo_destroy - vmw buffer object destructor
446  *
447  * @bo: Pointer to the embedded struct ttm_buffer_object
448  */
449 static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
450 {
451 	struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
452 	struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
453 
454 	WARN_ON(vbo->dirty);
455 	WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
456 	vmw_bo_unmap(vbo);
457 	ttm_prime_object_kfree(vmw_user_bo, prime);
458 }
459 
460 /**
461  * vmw_bo_create_kernel - Create a pinned BO for internal kernel use.
462  *
463  * @dev_priv: Pointer to the device private struct
464  * @size: size of the BO we need
465  * @placement: where to put it
466  * @p_bo: resulting BO
467  *
468  * Creates and pin a simple BO for in kernel use.
469  */
470 int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
471 			 struct ttm_placement *placement,
472 			 struct ttm_buffer_object **p_bo)
473 {
474 	struct ttm_operation_ctx ctx = { false, false };
475 	struct ttm_buffer_object *bo;
476 	size_t acc_size;
477 	int ret;
478 
479 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
480 	if (unlikely(!bo))
481 		return -ENOMEM;
482 
483 	acc_size = ttm_round_pot(sizeof(*bo));
484 	acc_size += ttm_round_pot(PFN_UP(size) * sizeof(void *));
485 	acc_size += ttm_round_pot(sizeof(struct ttm_tt));
486 
487 	ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
488 	if (unlikely(ret))
489 		goto error_free;
490 
491 
492 	bo->base.size = size;
493 	dma_resv_init(&bo->base._resv);
494 	drm_vma_node_reset(&bo->base.vma_node);
495 
496 	ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
497 				   ttm_bo_type_device, placement, 0,
498 				   &ctx, NULL, NULL, NULL);
499 	if (unlikely(ret))
500 		goto error_account;
501 
502 	ttm_bo_pin(bo);
503 	ttm_bo_unreserve(bo);
504 	*p_bo = bo;
505 
506 	return 0;
507 
508 error_account:
509 	ttm_mem_global_free(&ttm_mem_glob, acc_size);
510 
511 error_free:
512 	kfree(bo);
513 	return ret;
514 }
515 
516 /**
517  * vmw_bo_init - Initialize a vmw buffer object
518  *
519  * @dev_priv: Pointer to the device private struct
520  * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
521  * @size: Buffer object size in bytes.
522  * @placement: Initial placement.
523  * @interruptible: Whether waits should be performed interruptible.
524  * @pin: If the BO should be created pinned at a fixed location.
525  * @bo_free: The buffer object destructor.
526  * Returns: Zero on success, negative error code on error.
527  *
528  * Note that on error, the code will free the buffer object.
529  */
530 int vmw_bo_init(struct vmw_private *dev_priv,
531 		struct vmw_buffer_object *vmw_bo,
532 		size_t size, struct ttm_placement *placement,
533 		bool interruptible, bool pin,
534 		void (*bo_free)(struct ttm_buffer_object *bo))
535 {
536 	struct ttm_operation_ctx ctx = { interruptible, false };
537 	struct ttm_device *bdev = &dev_priv->bdev;
538 	size_t acc_size;
539 	int ret;
540 	bool user = (bo_free == &vmw_user_bo_destroy);
541 
542 	WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
543 
544 	acc_size = vmw_bo_acc_size(dev_priv, size, user);
545 	memset(vmw_bo, 0, sizeof(*vmw_bo));
546 	BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
547 	vmw_bo->base.priority = 3;
548 	vmw_bo->res_tree = RB_ROOT;
549 
550 	ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
551 	if (unlikely(ret))
552 		return ret;
553 
554 	vmw_bo->base.base.size = size;
555 	dma_resv_init(&vmw_bo->base.base._resv);
556 	drm_vma_node_reset(&vmw_bo->base.base.vma_node);
557 
558 	ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
559 				   ttm_bo_type_device, placement,
560 				   0, &ctx, NULL, NULL, bo_free);
561 	if (unlikely(ret)) {
562 		ttm_mem_global_free(&ttm_mem_glob, acc_size);
563 		return ret;
564 	}
565 
566 	if (pin)
567 		ttm_bo_pin(&vmw_bo->base);
568 	ttm_bo_unreserve(&vmw_bo->base);
569 	return 0;
570 }
571 
572 
573 /**
574  * vmw_user_bo_release - TTM reference base object release callback for
575  * vmw user buffer objects
576  *
577  * @p_base: The TTM base object pointer about to be unreferenced.
578  *
579  * Clears the TTM base object pointer and drops the reference the
580  * base object has on the underlying struct vmw_buffer_object.
581  */
582 static void vmw_user_bo_release(struct ttm_base_object **p_base)
583 {
584 	struct vmw_user_buffer_object *vmw_user_bo;
585 	struct ttm_base_object *base = *p_base;
586 
587 	*p_base = NULL;
588 
589 	if (unlikely(base == NULL))
590 		return;
591 
592 	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
593 				   prime.base);
594 	ttm_bo_put(&vmw_user_bo->vbo.base);
595 }
596 
597 
598 /**
599  * vmw_user_bo_ref_obj_release - TTM synccpu reference object release callback
600  * for vmw user buffer objects
601  *
602  * @base: Pointer to the TTM base object
603  * @ref_type: Reference type of the reference reaching zero.
604  *
605  * Called when user-space drops its last synccpu reference on the buffer
606  * object, Either explicitly or as part of a cleanup file close.
607  */
608 static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
609 					enum ttm_ref_type ref_type)
610 {
611 	struct vmw_user_buffer_object *user_bo;
612 
613 	user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
614 
615 	switch (ref_type) {
616 	case TTM_REF_SYNCCPU_WRITE:
617 		atomic_dec(&user_bo->vbo.cpu_writers);
618 		break;
619 	default:
620 		WARN_ONCE(true, "Undefined buffer object reference release.\n");
621 	}
622 }
623 
624 
625 /**
626  * vmw_user_bo_alloc - Allocate a user buffer object
627  *
628  * @dev_priv: Pointer to a struct device private.
629  * @tfile: Pointer to a struct ttm_object_file on which to register the user
630  * object.
631  * @size: Size of the buffer object.
632  * @shareable: Boolean whether the buffer is shareable with other open files.
633  * @handle: Pointer to where the handle value should be assigned.
634  * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
635  * should be assigned.
636  * @p_base: The TTM base object pointer about to be allocated.
637  * Return: Zero on success, negative error code on error.
638  */
639 int vmw_user_bo_alloc(struct vmw_private *dev_priv,
640 		      struct ttm_object_file *tfile,
641 		      uint32_t size,
642 		      bool shareable,
643 		      uint32_t *handle,
644 		      struct vmw_buffer_object **p_vbo,
645 		      struct ttm_base_object **p_base)
646 {
647 	struct vmw_user_buffer_object *user_bo;
648 	int ret;
649 
650 	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
651 	if (unlikely(!user_bo)) {
652 		DRM_ERROR("Failed to allocate a buffer.\n");
653 		return -ENOMEM;
654 	}
655 
656 	ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
657 			  (dev_priv->has_mob) ?
658 			  &vmw_sys_placement :
659 			  &vmw_vram_sys_placement, true, false,
660 			  &vmw_user_bo_destroy);
661 	if (unlikely(ret != 0))
662 		return ret;
663 
664 	ttm_bo_get(&user_bo->vbo.base);
665 	ret = ttm_prime_object_init(tfile,
666 				    size,
667 				    &user_bo->prime,
668 				    shareable,
669 				    ttm_buffer_type,
670 				    &vmw_user_bo_release,
671 				    &vmw_user_bo_ref_obj_release);
672 	if (unlikely(ret != 0)) {
673 		ttm_bo_put(&user_bo->vbo.base);
674 		goto out_no_base_object;
675 	}
676 
677 	*p_vbo = &user_bo->vbo;
678 	if (p_base) {
679 		*p_base = &user_bo->prime.base;
680 		kref_get(&(*p_base)->refcount);
681 	}
682 	*handle = user_bo->prime.base.handle;
683 
684 out_no_base_object:
685 	return ret;
686 }
687 
688 
689 /**
690  * vmw_user_bo_verify_access - verify access permissions on this
691  * buffer object.
692  *
693  * @bo: Pointer to the buffer object being accessed
694  * @tfile: Identifying the caller.
695  */
696 int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
697 			      struct ttm_object_file *tfile)
698 {
699 	struct vmw_user_buffer_object *vmw_user_bo;
700 
701 	if (unlikely(bo->destroy != vmw_user_bo_destroy))
702 		return -EPERM;
703 
704 	vmw_user_bo = vmw_user_buffer_object(bo);
705 
706 	/* Check that the caller has opened the object. */
707 	if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
708 		return 0;
709 
710 	DRM_ERROR("Could not grant buffer access.\n");
711 	return -EPERM;
712 }
713 
714 
715 /**
716  * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
717  * access, idling previous GPU operations on the buffer and optionally
718  * blocking it for further command submissions.
719  *
720  * @user_bo: Pointer to the buffer object being grabbed for CPU access
721  * @tfile: Identifying the caller.
722  * @flags: Flags indicating how the grab should be performed.
723  * Return: Zero on success, Negative error code on error. In particular,
724  * -EBUSY will be returned if a dontblock operation is requested and the
725  * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
726  * interrupted by a signal.
727  *
728  * A blocking grab will be automatically released when @tfile is closed.
729  */
730 static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
731 				    struct ttm_object_file *tfile,
732 				    uint32_t flags)
733 {
734 	bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
735 	struct ttm_buffer_object *bo = &user_bo->vbo.base;
736 	bool existed;
737 	int ret;
738 
739 	if (flags & drm_vmw_synccpu_allow_cs) {
740 		long lret;
741 
742 		lret = dma_resv_wait_timeout(bo->base.resv, true, true,
743 					     nonblock ? 0 :
744 					     MAX_SCHEDULE_TIMEOUT);
745 		if (!lret)
746 			return -EBUSY;
747 		else if (lret < 0)
748 			return lret;
749 		return 0;
750 	}
751 
752 	ret = ttm_bo_reserve(bo, true, nonblock, NULL);
753 	if (unlikely(ret != 0))
754 		return ret;
755 
756 	ret = ttm_bo_wait(bo, true, nonblock);
757 	if (likely(ret == 0))
758 		atomic_inc(&user_bo->vbo.cpu_writers);
759 
760 	ttm_bo_unreserve(bo);
761 	if (unlikely(ret != 0))
762 		return ret;
763 
764 	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
765 				 TTM_REF_SYNCCPU_WRITE, &existed, false);
766 	if (ret != 0 || existed)
767 		atomic_dec(&user_bo->vbo.cpu_writers);
768 
769 	return ret;
770 }
771 
772 /**
773  * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
774  * and unblock command submission on the buffer if blocked.
775  *
776  * @handle: Handle identifying the buffer object.
777  * @tfile: Identifying the caller.
778  * @flags: Flags indicating the type of release.
779  */
780 static int vmw_user_bo_synccpu_release(uint32_t handle,
781 					   struct ttm_object_file *tfile,
782 					   uint32_t flags)
783 {
784 	if (!(flags & drm_vmw_synccpu_allow_cs))
785 		return ttm_ref_object_base_unref(tfile, handle,
786 						 TTM_REF_SYNCCPU_WRITE);
787 
788 	return 0;
789 }
790 
791 
792 /**
793  * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
794  * functionality.
795  *
796  * @dev: Identifies the drm device.
797  * @data: Pointer to the ioctl argument.
798  * @file_priv: Identifies the caller.
799  * Return: Zero on success, negative error code on error.
800  *
801  * This function checks the ioctl arguments for validity and calls the
802  * relevant synccpu functions.
803  */
804 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
805 			      struct drm_file *file_priv)
806 {
807 	struct drm_vmw_synccpu_arg *arg =
808 		(struct drm_vmw_synccpu_arg *) data;
809 	struct vmw_buffer_object *vbo;
810 	struct vmw_user_buffer_object *user_bo;
811 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
812 	struct ttm_base_object *buffer_base;
813 	int ret;
814 
815 	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
816 	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
817 			       drm_vmw_synccpu_dontblock |
818 			       drm_vmw_synccpu_allow_cs)) != 0) {
819 		DRM_ERROR("Illegal synccpu flags.\n");
820 		return -EINVAL;
821 	}
822 
823 	switch (arg->op) {
824 	case drm_vmw_synccpu_grab:
825 		ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
826 					     &buffer_base);
827 		if (unlikely(ret != 0))
828 			return ret;
829 
830 		user_bo = container_of(vbo, struct vmw_user_buffer_object,
831 				       vbo);
832 		ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
833 		vmw_bo_unreference(&vbo);
834 		ttm_base_object_unref(&buffer_base);
835 		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
836 			     ret != -EBUSY)) {
837 			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
838 				  (unsigned int) arg->handle);
839 			return ret;
840 		}
841 		break;
842 	case drm_vmw_synccpu_release:
843 		ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
844 						  arg->flags);
845 		if (unlikely(ret != 0)) {
846 			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
847 				  (unsigned int) arg->handle);
848 			return ret;
849 		}
850 		break;
851 	default:
852 		DRM_ERROR("Invalid synccpu operation.\n");
853 		return -EINVAL;
854 	}
855 
856 	return 0;
857 }
858 
859 
860 /**
861  * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
862  * allocation functionality.
863  *
864  * @dev: Identifies the drm device.
865  * @data: Pointer to the ioctl argument.
866  * @file_priv: Identifies the caller.
867  * Return: Zero on success, negative error code on error.
868  *
869  * This function checks the ioctl arguments for validity and allocates a
870  * struct vmw_user_buffer_object bo.
871  */
872 int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
873 		       struct drm_file *file_priv)
874 {
875 	struct vmw_private *dev_priv = vmw_priv(dev);
876 	union drm_vmw_alloc_dmabuf_arg *arg =
877 	    (union drm_vmw_alloc_dmabuf_arg *)data;
878 	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
879 	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
880 	struct vmw_buffer_object *vbo;
881 	uint32_t handle;
882 	int ret;
883 
884 	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
885 				req->size, false, &handle, &vbo,
886 				NULL);
887 	if (unlikely(ret != 0))
888 		goto out_no_bo;
889 
890 	rep->handle = handle;
891 	rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
892 	rep->cur_gmr_id = handle;
893 	rep->cur_gmr_offset = 0;
894 
895 	vmw_bo_unreference(&vbo);
896 
897 out_no_bo:
898 
899 	return ret;
900 }
901 
902 
903 /**
904  * vmw_bo_unref_ioctl - Generic handle close ioctl.
905  *
906  * @dev: Identifies the drm device.
907  * @data: Pointer to the ioctl argument.
908  * @file_priv: Identifies the caller.
909  * Return: Zero on success, negative error code on error.
910  *
911  * This function checks the ioctl arguments for validity and closes a
912  * handle to a TTM base object, optionally freeing the object.
913  */
914 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
915 		       struct drm_file *file_priv)
916 {
917 	struct drm_vmw_unref_dmabuf_arg *arg =
918 	    (struct drm_vmw_unref_dmabuf_arg *)data;
919 
920 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
921 					 arg->handle,
922 					 TTM_REF_USAGE);
923 }
924 
925 
926 /**
927  * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
928  *
929  * @tfile: The TTM object file the handle is registered with.
930  * @handle: The user buffer object handle
931  * @out: Pointer to a where a pointer to the embedded
932  * struct vmw_buffer_object should be placed.
933  * @p_base: Pointer to where a pointer to the TTM base object should be
934  * placed, or NULL if no such pointer is required.
935  * Return: Zero on success, Negative error code on error.
936  *
937  * Both the output base object pointer and the vmw buffer object pointer
938  * will be refcounted.
939  */
940 int vmw_user_bo_lookup(struct ttm_object_file *tfile,
941 		       uint32_t handle, struct vmw_buffer_object **out,
942 		       struct ttm_base_object **p_base)
943 {
944 	struct vmw_user_buffer_object *vmw_user_bo;
945 	struct ttm_base_object *base;
946 
947 	base = ttm_base_object_lookup(tfile, handle);
948 	if (unlikely(base == NULL)) {
949 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
950 			  (unsigned long)handle);
951 		return -ESRCH;
952 	}
953 
954 	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
955 		ttm_base_object_unref(&base);
956 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
957 			  (unsigned long)handle);
958 		return -EINVAL;
959 	}
960 
961 	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
962 				   prime.base);
963 	ttm_bo_get(&vmw_user_bo->vbo.base);
964 	if (p_base)
965 		*p_base = base;
966 	else
967 		ttm_base_object_unref(&base);
968 	*out = &vmw_user_bo->vbo;
969 
970 	return 0;
971 }
972 
973 /**
974  * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
975  * @tfile: The TTM object file the handle is registered with.
976  * @handle: The user buffer object handle.
977  *
978  * This function looks up a struct vmw_user_bo and returns a pointer to the
979  * struct vmw_buffer_object it derives from without refcounting the pointer.
980  * The returned pointer is only valid until vmw_user_bo_noref_release() is
981  * called, and the object pointed to by the returned pointer may be doomed.
982  * Any persistent usage of the object requires a refcount to be taken using
983  * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
984  * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
985  * or scheduling functions may be called inbetween these function calls.
986  *
987  * Return: A struct vmw_buffer_object pointer if successful or negative
988  * error pointer on failure.
989  */
990 struct vmw_buffer_object *
991 vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
992 {
993 	struct vmw_user_buffer_object *vmw_user_bo;
994 	struct ttm_base_object *base;
995 
996 	base = ttm_base_object_noref_lookup(tfile, handle);
997 	if (!base) {
998 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
999 			  (unsigned long)handle);
1000 		return ERR_PTR(-ESRCH);
1001 	}
1002 
1003 	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
1004 		ttm_base_object_noref_release();
1005 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
1006 			  (unsigned long)handle);
1007 		return ERR_PTR(-EINVAL);
1008 	}
1009 
1010 	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
1011 				   prime.base);
1012 	return &vmw_user_bo->vbo;
1013 }
1014 
1015 /**
1016  * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
1017  *
1018  * @tfile: The TTM object file to register the handle with.
1019  * @vbo: The embedded vmw buffer object.
1020  * @handle: Pointer to where the new handle should be placed.
1021  * Return: Zero on success, Negative error code on error.
1022  */
1023 int vmw_user_bo_reference(struct ttm_object_file *tfile,
1024 			  struct vmw_buffer_object *vbo,
1025 			  uint32_t *handle)
1026 {
1027 	struct vmw_user_buffer_object *user_bo;
1028 
1029 	if (vbo->base.destroy != vmw_user_bo_destroy)
1030 		return -EINVAL;
1031 
1032 	user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
1033 
1034 	*handle = user_bo->prime.base.handle;
1035 	return ttm_ref_object_add(tfile, &user_bo->prime.base,
1036 				  TTM_REF_USAGE, NULL, false);
1037 }
1038 
1039 
1040 /**
1041  * vmw_bo_fence_single - Utility function to fence a single TTM buffer
1042  *                       object without unreserving it.
1043  *
1044  * @bo:             Pointer to the struct ttm_buffer_object to fence.
1045  * @fence:          Pointer to the fence. If NULL, this function will
1046  *                  insert a fence into the command stream..
1047  *
1048  * Contrary to the ttm_eu version of this function, it takes only
1049  * a single buffer object instead of a list, and it also doesn't
1050  * unreserve the buffer object, which needs to be done separately.
1051  */
1052 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1053 			 struct vmw_fence_obj *fence)
1054 {
1055 	struct ttm_device *bdev = bo->bdev;
1056 
1057 	struct vmw_private *dev_priv =
1058 		container_of(bdev, struct vmw_private, bdev);
1059 
1060 	if (fence == NULL) {
1061 		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1062 		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1063 		dma_fence_put(&fence->base);
1064 	} else
1065 		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1066 }
1067 
1068 
1069 /**
1070  * vmw_dumb_create - Create a dumb kms buffer
1071  *
1072  * @file_priv: Pointer to a struct drm_file identifying the caller.
1073  * @dev: Pointer to the drm device.
1074  * @args: Pointer to a struct drm_mode_create_dumb structure
1075  * Return: Zero on success, negative error code on failure.
1076  *
1077  * This is a driver callback for the core drm create_dumb functionality.
1078  * Note that this is very similar to the vmw_bo_alloc ioctl, except
1079  * that the arguments have a different format.
1080  */
1081 int vmw_dumb_create(struct drm_file *file_priv,
1082 		    struct drm_device *dev,
1083 		    struct drm_mode_create_dumb *args)
1084 {
1085 	struct vmw_private *dev_priv = vmw_priv(dev);
1086 	struct vmw_buffer_object *vbo;
1087 	int ret;
1088 
1089 	args->pitch = args->width * ((args->bpp + 7) / 8);
1090 	args->size = args->pitch * args->height;
1091 
1092 	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1093 				    args->size, false, &args->handle,
1094 				    &vbo, NULL);
1095 	if (unlikely(ret != 0))
1096 		goto out_no_bo;
1097 
1098 	vmw_bo_unreference(&vbo);
1099 out_no_bo:
1100 	return ret;
1101 }
1102 
1103 
1104 /**
1105  * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1106  *
1107  * @file_priv: Pointer to a struct drm_file identifying the caller.
1108  * @dev: Pointer to the drm device.
1109  * @handle: Handle identifying the dumb buffer.
1110  * @offset: The address space offset returned.
1111  * Return: Zero on success, negative error code on failure.
1112  *
1113  * This is a driver callback for the core drm dumb_map_offset functionality.
1114  */
1115 int vmw_dumb_map_offset(struct drm_file *file_priv,
1116 			struct drm_device *dev, uint32_t handle,
1117 			uint64_t *offset)
1118 {
1119 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1120 	struct vmw_buffer_object *out_buf;
1121 	int ret;
1122 
1123 	ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1124 	if (ret != 0)
1125 		return -EINVAL;
1126 
1127 	*offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
1128 	vmw_bo_unreference(&out_buf);
1129 	return 0;
1130 }
1131 
1132 
1133 /**
1134  * vmw_dumb_destroy - Destroy a dumb boffer
1135  *
1136  * @file_priv: Pointer to a struct drm_file identifying the caller.
1137  * @dev: Pointer to the drm device.
1138  * @handle: Handle identifying the dumb buffer.
1139  * Return: Zero on success, negative error code on failure.
1140  *
1141  * This is a driver callback for the core drm dumb_destroy functionality.
1142  */
1143 int vmw_dumb_destroy(struct drm_file *file_priv,
1144 		     struct drm_device *dev,
1145 		     uint32_t handle)
1146 {
1147 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1148 					 handle, TTM_REF_USAGE);
1149 }
1150 
1151 
1152 /**
1153  * vmw_bo_swap_notify - swapout notify callback.
1154  *
1155  * @bo: The buffer object to be swapped out.
1156  */
1157 void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1158 {
1159 	/* Is @bo embedded in a struct vmw_buffer_object? */
1160 	if (bo->destroy != vmw_bo_bo_free &&
1161 	    bo->destroy != vmw_user_bo_destroy)
1162 		return;
1163 
1164 	/* Kill any cached kernel maps before swapout */
1165 	vmw_bo_unmap(vmw_buffer_object(bo));
1166 }
1167 
1168 
1169 /**
1170  * vmw_bo_move_notify - TTM move_notify_callback
1171  *
1172  * @bo: The TTM buffer object about to move.
1173  * @mem: The struct ttm_resource indicating to what memory
1174  *       region the move is taking place.
1175  *
1176  * Detaches cached maps and device bindings that require that the
1177  * buffer doesn't move.
1178  */
1179 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1180 			struct ttm_resource *mem)
1181 {
1182 	struct vmw_buffer_object *vbo;
1183 
1184 	/* Make sure @bo is embedded in a struct vmw_buffer_object? */
1185 	if (bo->destroy != vmw_bo_bo_free &&
1186 	    bo->destroy != vmw_user_bo_destroy)
1187 		return;
1188 
1189 	vbo = container_of(bo, struct vmw_buffer_object, base);
1190 
1191 	/*
1192 	 * Kill any cached kernel maps before move to or from VRAM.
1193 	 * With other types of moves, the underlying pages stay the same,
1194 	 * and the map can be kept.
1195 	 */
1196 	if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM)
1197 		vmw_bo_unmap(vbo);
1198 
1199 	/*
1200 	 * If we're moving a backup MOB out of MOB placement, then make sure we
1201 	 * read back all resource content first, and unbind the MOB from
1202 	 * the resource.
1203 	 */
1204 	if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
1205 		vmw_resource_unbind_list(vbo);
1206 }
1207