xref: /openbmc/linux/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c (revision c2cd9d04)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 
29 #include <drm/ttm/ttm_placement.h>
30 
31 #include <drm/drmP.h>
32 #include "vmwgfx_drv.h"
33 #include "ttm_object.h"
34 
35 
36 /**
37  * struct vmw_user_buffer_object - User-space-visible buffer object
38  *
39  * @prime: The prime object providing user visibility.
40  * @vbo: The struct vmw_buffer_object
41  */
42 struct vmw_user_buffer_object {
43 	struct ttm_prime_object prime;
44 	struct vmw_buffer_object vbo;
45 };
46 
47 
48 /**
49  * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
50  * vmw_buffer_object.
51  *
52  * @bo: Pointer to the TTM buffer object.
53  * Return: Pointer to the struct vmw_buffer_object embedding the
54  * TTM buffer object.
55  */
56 static struct vmw_buffer_object *
57 vmw_buffer_object(struct ttm_buffer_object *bo)
58 {
59 	return container_of(bo, struct vmw_buffer_object, base);
60 }
61 
62 
63 /**
64  * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
65  * vmw_user_buffer_object.
66  *
67  * @bo: Pointer to the TTM buffer object.
68  * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
69  * object.
70  */
71 static struct vmw_user_buffer_object *
72 vmw_user_buffer_object(struct ttm_buffer_object *bo)
73 {
74 	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
75 
76 	return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
77 }
78 
79 
80 /**
81  * vmw_bo_pin_in_placement - Validate a buffer to placement.
82  *
83  * @dev_priv:  Driver private.
84  * @buf:  DMA buffer to move.
85  * @placement:  The placement to pin it.
86  * @interruptible:  Use interruptible wait.
87  * Return: Zero on success, Negative error code on failure. In particular
88  * -ERESTARTSYS if interrupted by a signal
89  */
90 int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
91 			    struct vmw_buffer_object *buf,
92 			    struct ttm_placement *placement,
93 			    bool interruptible)
94 {
95 	struct ttm_operation_ctx ctx = {interruptible, false };
96 	struct ttm_buffer_object *bo = &buf->base;
97 	int ret;
98 	uint32_t new_flags;
99 
100 	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
101 	if (unlikely(ret != 0))
102 		return ret;
103 
104 	vmw_execbuf_release_pinned_bo(dev_priv);
105 
106 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
107 	if (unlikely(ret != 0))
108 		goto err;
109 
110 	if (buf->pin_count > 0)
111 		ret = ttm_bo_mem_compat(placement, &bo->mem,
112 					&new_flags) == true ? 0 : -EINVAL;
113 	else
114 		ret = ttm_bo_validate(bo, placement, &ctx);
115 
116 	if (!ret)
117 		vmw_bo_pin_reserved(buf, true);
118 
119 	ttm_bo_unreserve(bo);
120 
121 err:
122 	ttm_write_unlock(&dev_priv->reservation_sem);
123 	return ret;
124 }
125 
126 
127 /**
128  * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
129  *
130  * This function takes the reservation_sem in write mode.
131  * Flushes and unpins the query bo to avoid failures.
132  *
133  * @dev_priv:  Driver private.
134  * @buf:  DMA buffer to move.
135  * @pin:  Pin buffer if true.
136  * @interruptible:  Use interruptible wait.
137  * Return: Zero on success, Negative error code on failure. In particular
138  * -ERESTARTSYS if interrupted by a signal
139  */
140 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
141 			      struct vmw_buffer_object *buf,
142 			      bool interruptible)
143 {
144 	struct ttm_operation_ctx ctx = {interruptible, false };
145 	struct ttm_buffer_object *bo = &buf->base;
146 	int ret;
147 	uint32_t new_flags;
148 
149 	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
150 	if (unlikely(ret != 0))
151 		return ret;
152 
153 	vmw_execbuf_release_pinned_bo(dev_priv);
154 
155 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
156 	if (unlikely(ret != 0))
157 		goto err;
158 
159 	if (buf->pin_count > 0) {
160 		ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
161 					&new_flags) == true ? 0 : -EINVAL;
162 		goto out_unreserve;
163 	}
164 
165 	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
166 	if (likely(ret == 0) || ret == -ERESTARTSYS)
167 		goto out_unreserve;
168 
169 	ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
170 
171 out_unreserve:
172 	if (!ret)
173 		vmw_bo_pin_reserved(buf, true);
174 
175 	ttm_bo_unreserve(bo);
176 err:
177 	ttm_write_unlock(&dev_priv->reservation_sem);
178 	return ret;
179 }
180 
181 
182 /**
183  * vmw_bo_pin_in_vram - Move a buffer to vram.
184  *
185  * This function takes the reservation_sem in write mode.
186  * Flushes and unpins the query bo to avoid failures.
187  *
188  * @dev_priv:  Driver private.
189  * @buf:  DMA buffer to move.
190  * @interruptible:  Use interruptible wait.
191  * Return: Zero on success, Negative error code on failure. In particular
192  * -ERESTARTSYS if interrupted by a signal
193  */
194 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
195 		       struct vmw_buffer_object *buf,
196 		       bool interruptible)
197 {
198 	return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
199 				       interruptible);
200 }
201 
202 
203 /**
204  * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
205  *
206  * This function takes the reservation_sem in write mode.
207  * Flushes and unpins the query bo to avoid failures.
208  *
209  * @dev_priv:  Driver private.
210  * @buf:  DMA buffer to pin.
211  * @interruptible:  Use interruptible wait.
212  * Return: Zero on success, Negative error code on failure. In particular
213  * -ERESTARTSYS if interrupted by a signal
214  */
215 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
216 				struct vmw_buffer_object *buf,
217 				bool interruptible)
218 {
219 	struct ttm_operation_ctx ctx = {interruptible, false };
220 	struct ttm_buffer_object *bo = &buf->base;
221 	struct ttm_placement placement;
222 	struct ttm_place place;
223 	int ret = 0;
224 	uint32_t new_flags;
225 
226 	place = vmw_vram_placement.placement[0];
227 	place.lpfn = bo->num_pages;
228 	placement.num_placement = 1;
229 	placement.placement = &place;
230 	placement.num_busy_placement = 1;
231 	placement.busy_placement = &place;
232 
233 	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
234 	if (unlikely(ret != 0))
235 		return ret;
236 
237 	vmw_execbuf_release_pinned_bo(dev_priv);
238 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
239 	if (unlikely(ret != 0))
240 		goto err_unlock;
241 
242 	/*
243 	 * Is this buffer already in vram but not at the start of it?
244 	 * In that case, evict it first because TTM isn't good at handling
245 	 * that situation.
246 	 */
247 	if (bo->mem.mem_type == TTM_PL_VRAM &&
248 	    bo->mem.start < bo->num_pages &&
249 	    bo->mem.start > 0 &&
250 	    buf->pin_count == 0) {
251 		ctx.interruptible = false;
252 		(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
253 	}
254 
255 	if (buf->pin_count > 0)
256 		ret = ttm_bo_mem_compat(&placement, &bo->mem,
257 					&new_flags) == true ? 0 : -EINVAL;
258 	else
259 		ret = ttm_bo_validate(bo, &placement, &ctx);
260 
261 	/* For some reason we didn't end up at the start of vram */
262 	WARN_ON(ret == 0 && bo->offset != 0);
263 	if (!ret)
264 		vmw_bo_pin_reserved(buf, true);
265 
266 	ttm_bo_unreserve(bo);
267 err_unlock:
268 	ttm_write_unlock(&dev_priv->reservation_sem);
269 
270 	return ret;
271 }
272 
273 
274 /**
275  * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
276  *
277  * This function takes the reservation_sem in write mode.
278  *
279  * @dev_priv:  Driver private.
280  * @buf:  DMA buffer to unpin.
281  * @interruptible:  Use interruptible wait.
282  * Return: Zero on success, Negative error code on failure. In particular
283  * -ERESTARTSYS if interrupted by a signal
284  */
285 int vmw_bo_unpin(struct vmw_private *dev_priv,
286 		 struct vmw_buffer_object *buf,
287 		 bool interruptible)
288 {
289 	struct ttm_buffer_object *bo = &buf->base;
290 	int ret;
291 
292 	ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
293 	if (unlikely(ret != 0))
294 		return ret;
295 
296 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
297 	if (unlikely(ret != 0))
298 		goto err;
299 
300 	vmw_bo_pin_reserved(buf, false);
301 
302 	ttm_bo_unreserve(bo);
303 
304 err:
305 	ttm_read_unlock(&dev_priv->reservation_sem);
306 	return ret;
307 }
308 
309 /**
310  * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
311  * of a buffer.
312  *
313  * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
314  * @ptr: SVGAGuestPtr returning the result.
315  */
316 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
317 			  SVGAGuestPtr *ptr)
318 {
319 	if (bo->mem.mem_type == TTM_PL_VRAM) {
320 		ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
321 		ptr->offset = bo->offset;
322 	} else {
323 		ptr->gmrId = bo->mem.start;
324 		ptr->offset = 0;
325 	}
326 }
327 
328 
329 /**
330  * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
331  *
332  * @vbo: The buffer object. Must be reserved.
333  * @pin: Whether to pin or unpin.
334  *
335  */
336 void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
337 {
338 	struct ttm_operation_ctx ctx = { false, true };
339 	struct ttm_place pl;
340 	struct ttm_placement placement;
341 	struct ttm_buffer_object *bo = &vbo->base;
342 	uint32_t old_mem_type = bo->mem.mem_type;
343 	int ret;
344 
345 	lockdep_assert_held(&bo->resv->lock.base);
346 
347 	if (pin) {
348 		if (vbo->pin_count++ > 0)
349 			return;
350 	} else {
351 		WARN_ON(vbo->pin_count <= 0);
352 		if (--vbo->pin_count > 0)
353 			return;
354 	}
355 
356 	pl.fpfn = 0;
357 	pl.lpfn = 0;
358 	pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
359 		| TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
360 	if (pin)
361 		pl.flags |= TTM_PL_FLAG_NO_EVICT;
362 
363 	memset(&placement, 0, sizeof(placement));
364 	placement.num_placement = 1;
365 	placement.placement = &pl;
366 
367 	ret = ttm_bo_validate(bo, &placement, &ctx);
368 
369 	BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
370 }
371 
372 
373 /**
374  * vmw_bo_map_and_cache - Map a buffer object and cache the map
375  *
376  * @vbo: The buffer object to map
377  * Return: A kernel virtual address or NULL if mapping failed.
378  *
379  * This function maps a buffer object into the kernel address space, or
380  * returns the virtual kernel address of an already existing map. The virtual
381  * address remains valid as long as the buffer object is pinned or reserved.
382  * The cached map is torn down on either
383  * 1) Buffer object move
384  * 2) Buffer object swapout
385  * 3) Buffer object destruction
386  *
387  */
388 void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
389 {
390 	struct ttm_buffer_object *bo = &vbo->base;
391 	bool not_used;
392 	void *virtual;
393 	int ret;
394 
395 	virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
396 	if (virtual)
397 		return virtual;
398 
399 	ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
400 	if (ret)
401 		DRM_ERROR("Buffer object map failed: %d.\n", ret);
402 
403 	return ttm_kmap_obj_virtual(&vbo->map, &not_used);
404 }
405 
406 
407 /**
408  * vmw_bo_unmap - Tear down a cached buffer object map.
409  *
410  * @vbo: The buffer object whose map we are tearing down.
411  *
412  * This function tears down a cached map set up using
413  * vmw_buffer_object_map_and_cache().
414  */
415 void vmw_bo_unmap(struct vmw_buffer_object *vbo)
416 {
417 	if (vbo->map.bo == NULL)
418 		return;
419 
420 	ttm_bo_kunmap(&vbo->map);
421 }
422 
423 
424 /**
425  * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
426  *
427  * @dev_priv: Pointer to a struct vmw_private identifying the device.
428  * @size: The requested buffer size.
429  * @user: Whether this is an ordinary dma buffer or a user dma buffer.
430  */
431 static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
432 			      bool user)
433 {
434 	static size_t struct_size, user_struct_size;
435 	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
436 	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
437 
438 	if (unlikely(struct_size == 0)) {
439 		size_t backend_size = ttm_round_pot(vmw_tt_size);
440 
441 		struct_size = backend_size +
442 			ttm_round_pot(sizeof(struct vmw_buffer_object));
443 		user_struct_size = backend_size +
444 		  ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
445 				      TTM_OBJ_EXTRA_SIZE;
446 	}
447 
448 	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
449 		page_array_size +=
450 			ttm_round_pot(num_pages * sizeof(dma_addr_t));
451 
452 	return ((user) ? user_struct_size : struct_size) +
453 		page_array_size;
454 }
455 
456 
457 /**
458  * vmw_bo_bo_free - vmw buffer object destructor
459  *
460  * @bo: Pointer to the embedded struct ttm_buffer_object
461  */
462 void vmw_bo_bo_free(struct ttm_buffer_object *bo)
463 {
464 	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
465 
466 	WARN_ON(vmw_bo->dirty);
467 	WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
468 	vmw_bo_unmap(vmw_bo);
469 	kfree(vmw_bo);
470 }
471 
472 
473 /**
474  * vmw_user_bo_destroy - vmw buffer object destructor
475  *
476  * @bo: Pointer to the embedded struct ttm_buffer_object
477  */
478 static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
479 {
480 	struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
481 	struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
482 
483 	WARN_ON(vbo->dirty);
484 	WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
485 	vmw_bo_unmap(vbo);
486 	ttm_prime_object_kfree(vmw_user_bo, prime);
487 }
488 
489 
490 /**
491  * vmw_bo_init - Initialize a vmw buffer object
492  *
493  * @dev_priv: Pointer to the device private struct
494  * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
495  * @size: Buffer object size in bytes.
496  * @placement: Initial placement.
497  * @interruptible: Whether waits should be performed interruptible.
498  * @bo_free: The buffer object destructor.
499  * Returns: Zero on success, negative error code on error.
500  *
501  * Note that on error, the code will free the buffer object.
502  */
503 int vmw_bo_init(struct vmw_private *dev_priv,
504 		struct vmw_buffer_object *vmw_bo,
505 		size_t size, struct ttm_placement *placement,
506 		bool interruptible,
507 		void (*bo_free)(struct ttm_buffer_object *bo))
508 {
509 	struct ttm_bo_device *bdev = &dev_priv->bdev;
510 	size_t acc_size;
511 	int ret;
512 	bool user = (bo_free == &vmw_user_bo_destroy);
513 
514 	WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
515 
516 	acc_size = vmw_bo_acc_size(dev_priv, size, user);
517 	memset(vmw_bo, 0, sizeof(*vmw_bo));
518 	BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
519 	vmw_bo->base.priority = 3;
520 	vmw_bo->res_tree = RB_ROOT;
521 
522 	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
523 			  ttm_bo_type_device, placement,
524 			  0, interruptible, acc_size,
525 			  NULL, NULL, bo_free);
526 	return ret;
527 }
528 
529 
530 /**
531  * vmw_user_bo_release - TTM reference base object release callback for
532  * vmw user buffer objects
533  *
534  * @p_base: The TTM base object pointer about to be unreferenced.
535  *
536  * Clears the TTM base object pointer and drops the reference the
537  * base object has on the underlying struct vmw_buffer_object.
538  */
539 static void vmw_user_bo_release(struct ttm_base_object **p_base)
540 {
541 	struct vmw_user_buffer_object *vmw_user_bo;
542 	struct ttm_base_object *base = *p_base;
543 
544 	*p_base = NULL;
545 
546 	if (unlikely(base == NULL))
547 		return;
548 
549 	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
550 				   prime.base);
551 	ttm_bo_put(&vmw_user_bo->vbo.base);
552 }
553 
554 
555 /**
556  * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
557  * for vmw user buffer objects
558  *
559  * @base: Pointer to the TTM base object
560  * @ref_type: Reference type of the reference reaching zero.
561  *
562  * Called when user-space drops its last synccpu reference on the buffer
563  * object, Either explicitly or as part of a cleanup file close.
564  */
565 static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
566 					enum ttm_ref_type ref_type)
567 {
568 	struct vmw_user_buffer_object *user_bo;
569 
570 	user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
571 
572 	switch (ref_type) {
573 	case TTM_REF_SYNCCPU_WRITE:
574 		ttm_bo_synccpu_write_release(&user_bo->vbo.base);
575 		break;
576 	default:
577 		WARN_ONCE(true, "Undefined buffer object reference release.\n");
578 	}
579 }
580 
581 
582 /**
583  * vmw_user_bo_alloc - Allocate a user buffer object
584  *
585  * @dev_priv: Pointer to a struct device private.
586  * @tfile: Pointer to a struct ttm_object_file on which to register the user
587  * object.
588  * @size: Size of the buffer object.
589  * @shareable: Boolean whether the buffer is shareable with other open files.
590  * @handle: Pointer to where the handle value should be assigned.
591  * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
592  * should be assigned.
593  * Return: Zero on success, negative error code on error.
594  */
595 int vmw_user_bo_alloc(struct vmw_private *dev_priv,
596 		      struct ttm_object_file *tfile,
597 		      uint32_t size,
598 		      bool shareable,
599 		      uint32_t *handle,
600 		      struct vmw_buffer_object **p_vbo,
601 		      struct ttm_base_object **p_base)
602 {
603 	struct vmw_user_buffer_object *user_bo;
604 	int ret;
605 
606 	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
607 	if (unlikely(!user_bo)) {
608 		DRM_ERROR("Failed to allocate a buffer.\n");
609 		return -ENOMEM;
610 	}
611 
612 	ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
613 			  (dev_priv->has_mob) ?
614 			  &vmw_sys_placement :
615 			  &vmw_vram_sys_placement, true,
616 			  &vmw_user_bo_destroy);
617 	if (unlikely(ret != 0))
618 		return ret;
619 
620 	ttm_bo_get(&user_bo->vbo.base);
621 	ret = ttm_prime_object_init(tfile,
622 				    size,
623 				    &user_bo->prime,
624 				    shareable,
625 				    ttm_buffer_type,
626 				    &vmw_user_bo_release,
627 				    &vmw_user_bo_ref_obj_release);
628 	if (unlikely(ret != 0)) {
629 		ttm_bo_put(&user_bo->vbo.base);
630 		goto out_no_base_object;
631 	}
632 
633 	*p_vbo = &user_bo->vbo;
634 	if (p_base) {
635 		*p_base = &user_bo->prime.base;
636 		kref_get(&(*p_base)->refcount);
637 	}
638 	*handle = user_bo->prime.base.handle;
639 
640 out_no_base_object:
641 	return ret;
642 }
643 
644 
645 /**
646  * vmw_user_bo_verify_access - verify access permissions on this
647  * buffer object.
648  *
649  * @bo: Pointer to the buffer object being accessed
650  * @tfile: Identifying the caller.
651  */
652 int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
653 			      struct ttm_object_file *tfile)
654 {
655 	struct vmw_user_buffer_object *vmw_user_bo;
656 
657 	if (unlikely(bo->destroy != vmw_user_bo_destroy))
658 		return -EPERM;
659 
660 	vmw_user_bo = vmw_user_buffer_object(bo);
661 
662 	/* Check that the caller has opened the object. */
663 	if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
664 		return 0;
665 
666 	DRM_ERROR("Could not grant buffer access.\n");
667 	return -EPERM;
668 }
669 
670 
671 /**
672  * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
673  * access, idling previous GPU operations on the buffer and optionally
674  * blocking it for further command submissions.
675  *
676  * @user_bo: Pointer to the buffer object being grabbed for CPU access
677  * @tfile: Identifying the caller.
678  * @flags: Flags indicating how the grab should be performed.
679  * Return: Zero on success, Negative error code on error. In particular,
680  * -EBUSY will be returned if a dontblock operation is requested and the
681  * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
682  * interrupted by a signal.
683  *
684  * A blocking grab will be automatically released when @tfile is closed.
685  */
686 static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
687 				    struct ttm_object_file *tfile,
688 				    uint32_t flags)
689 {
690 	struct ttm_buffer_object *bo = &user_bo->vbo.base;
691 	bool existed;
692 	int ret;
693 
694 	if (flags & drm_vmw_synccpu_allow_cs) {
695 		bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
696 		long lret;
697 
698 		lret = reservation_object_wait_timeout_rcu
699 			(bo->resv, true, true,
700 			 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
701 		if (!lret)
702 			return -EBUSY;
703 		else if (lret < 0)
704 			return lret;
705 		return 0;
706 	}
707 
708 	ret = ttm_bo_synccpu_write_grab
709 		(bo, !!(flags & drm_vmw_synccpu_dontblock));
710 	if (unlikely(ret != 0))
711 		return ret;
712 
713 	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
714 				 TTM_REF_SYNCCPU_WRITE, &existed, false);
715 	if (ret != 0 || existed)
716 		ttm_bo_synccpu_write_release(&user_bo->vbo.base);
717 
718 	return ret;
719 }
720 
721 /**
722  * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
723  * and unblock command submission on the buffer if blocked.
724  *
725  * @handle: Handle identifying the buffer object.
726  * @tfile: Identifying the caller.
727  * @flags: Flags indicating the type of release.
728  */
729 static int vmw_user_bo_synccpu_release(uint32_t handle,
730 					   struct ttm_object_file *tfile,
731 					   uint32_t flags)
732 {
733 	if (!(flags & drm_vmw_synccpu_allow_cs))
734 		return ttm_ref_object_base_unref(tfile, handle,
735 						 TTM_REF_SYNCCPU_WRITE);
736 
737 	return 0;
738 }
739 
740 
741 /**
742  * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
743  * functionality.
744  *
745  * @dev: Identifies the drm device.
746  * @data: Pointer to the ioctl argument.
747  * @file_priv: Identifies the caller.
748  * Return: Zero on success, negative error code on error.
749  *
750  * This function checks the ioctl arguments for validity and calls the
751  * relevant synccpu functions.
752  */
753 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
754 			      struct drm_file *file_priv)
755 {
756 	struct drm_vmw_synccpu_arg *arg =
757 		(struct drm_vmw_synccpu_arg *) data;
758 	struct vmw_buffer_object *vbo;
759 	struct vmw_user_buffer_object *user_bo;
760 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
761 	struct ttm_base_object *buffer_base;
762 	int ret;
763 
764 	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
765 	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
766 			       drm_vmw_synccpu_dontblock |
767 			       drm_vmw_synccpu_allow_cs)) != 0) {
768 		DRM_ERROR("Illegal synccpu flags.\n");
769 		return -EINVAL;
770 	}
771 
772 	switch (arg->op) {
773 	case drm_vmw_synccpu_grab:
774 		ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
775 					     &buffer_base);
776 		if (unlikely(ret != 0))
777 			return ret;
778 
779 		user_bo = container_of(vbo, struct vmw_user_buffer_object,
780 				       vbo);
781 		ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
782 		vmw_bo_unreference(&vbo);
783 		ttm_base_object_unref(&buffer_base);
784 		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
785 			     ret != -EBUSY)) {
786 			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
787 				  (unsigned int) arg->handle);
788 			return ret;
789 		}
790 		break;
791 	case drm_vmw_synccpu_release:
792 		ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
793 						  arg->flags);
794 		if (unlikely(ret != 0)) {
795 			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
796 				  (unsigned int) arg->handle);
797 			return ret;
798 		}
799 		break;
800 	default:
801 		DRM_ERROR("Invalid synccpu operation.\n");
802 		return -EINVAL;
803 	}
804 
805 	return 0;
806 }
807 
808 
809 /**
810  * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
811  * allocation functionality.
812  *
813  * @dev: Identifies the drm device.
814  * @data: Pointer to the ioctl argument.
815  * @file_priv: Identifies the caller.
816  * Return: Zero on success, negative error code on error.
817  *
818  * This function checks the ioctl arguments for validity and allocates a
819  * struct vmw_user_buffer_object bo.
820  */
821 int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
822 		       struct drm_file *file_priv)
823 {
824 	struct vmw_private *dev_priv = vmw_priv(dev);
825 	union drm_vmw_alloc_dmabuf_arg *arg =
826 	    (union drm_vmw_alloc_dmabuf_arg *)data;
827 	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
828 	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
829 	struct vmw_buffer_object *vbo;
830 	uint32_t handle;
831 	int ret;
832 
833 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
834 	if (unlikely(ret != 0))
835 		return ret;
836 
837 	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
838 				req->size, false, &handle, &vbo,
839 				NULL);
840 	if (unlikely(ret != 0))
841 		goto out_no_bo;
842 
843 	rep->handle = handle;
844 	rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node);
845 	rep->cur_gmr_id = handle;
846 	rep->cur_gmr_offset = 0;
847 
848 	vmw_bo_unreference(&vbo);
849 
850 out_no_bo:
851 	ttm_read_unlock(&dev_priv->reservation_sem);
852 
853 	return ret;
854 }
855 
856 
857 /**
858  * vmw_bo_unref_ioctl - Generic handle close ioctl.
859  *
860  * @dev: Identifies the drm device.
861  * @data: Pointer to the ioctl argument.
862  * @file_priv: Identifies the caller.
863  * Return: Zero on success, negative error code on error.
864  *
865  * This function checks the ioctl arguments for validity and closes a
866  * handle to a TTM base object, optionally freeing the object.
867  */
868 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
869 		       struct drm_file *file_priv)
870 {
871 	struct drm_vmw_unref_dmabuf_arg *arg =
872 	    (struct drm_vmw_unref_dmabuf_arg *)data;
873 
874 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
875 					 arg->handle,
876 					 TTM_REF_USAGE);
877 }
878 
879 
880 /**
881  * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
882  *
883  * @tfile: The TTM object file the handle is registered with.
884  * @handle: The user buffer object handle
885  * @out: Pointer to a where a pointer to the embedded
886  * struct vmw_buffer_object should be placed.
887  * @p_base: Pointer to where a pointer to the TTM base object should be
888  * placed, or NULL if no such pointer is required.
889  * Return: Zero on success, Negative error code on error.
890  *
891  * Both the output base object pointer and the vmw buffer object pointer
892  * will be refcounted.
893  */
894 int vmw_user_bo_lookup(struct ttm_object_file *tfile,
895 		       uint32_t handle, struct vmw_buffer_object **out,
896 		       struct ttm_base_object **p_base)
897 {
898 	struct vmw_user_buffer_object *vmw_user_bo;
899 	struct ttm_base_object *base;
900 
901 	base = ttm_base_object_lookup(tfile, handle);
902 	if (unlikely(base == NULL)) {
903 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
904 			  (unsigned long)handle);
905 		return -ESRCH;
906 	}
907 
908 	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
909 		ttm_base_object_unref(&base);
910 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
911 			  (unsigned long)handle);
912 		return -EINVAL;
913 	}
914 
915 	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
916 				   prime.base);
917 	ttm_bo_get(&vmw_user_bo->vbo.base);
918 	if (p_base)
919 		*p_base = base;
920 	else
921 		ttm_base_object_unref(&base);
922 	*out = &vmw_user_bo->vbo;
923 
924 	return 0;
925 }
926 
927 /**
928  * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
929  * @tfile: The TTM object file the handle is registered with.
930  * @handle: The user buffer object handle.
931  *
932  * This function looks up a struct vmw_user_bo and returns a pointer to the
933  * struct vmw_buffer_object it derives from without refcounting the pointer.
934  * The returned pointer is only valid until vmw_user_bo_noref_release() is
935  * called, and the object pointed to by the returned pointer may be doomed.
936  * Any persistent usage of the object requires a refcount to be taken using
937  * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
938  * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
939  * or scheduling functions may be called inbetween these function calls.
940  *
941  * Return: A struct vmw_buffer_object pointer if successful or negative
942  * error pointer on failure.
943  */
944 struct vmw_buffer_object *
945 vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
946 {
947 	struct vmw_user_buffer_object *vmw_user_bo;
948 	struct ttm_base_object *base;
949 
950 	base = ttm_base_object_noref_lookup(tfile, handle);
951 	if (!base) {
952 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
953 			  (unsigned long)handle);
954 		return ERR_PTR(-ESRCH);
955 	}
956 
957 	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
958 		ttm_base_object_noref_release();
959 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
960 			  (unsigned long)handle);
961 		return ERR_PTR(-EINVAL);
962 	}
963 
964 	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
965 				   prime.base);
966 	return &vmw_user_bo->vbo;
967 }
968 
969 /**
970  * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
971  *
972  * @tfile: The TTM object file to register the handle with.
973  * @vbo: The embedded vmw buffer object.
974  * @handle: Pointer to where the new handle should be placed.
975  * Return: Zero on success, Negative error code on error.
976  */
977 int vmw_user_bo_reference(struct ttm_object_file *tfile,
978 			  struct vmw_buffer_object *vbo,
979 			  uint32_t *handle)
980 {
981 	struct vmw_user_buffer_object *user_bo;
982 
983 	if (vbo->base.destroy != vmw_user_bo_destroy)
984 		return -EINVAL;
985 
986 	user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
987 
988 	*handle = user_bo->prime.base.handle;
989 	return ttm_ref_object_add(tfile, &user_bo->prime.base,
990 				  TTM_REF_USAGE, NULL, false);
991 }
992 
993 
994 /**
995  * vmw_bo_fence_single - Utility function to fence a single TTM buffer
996  *                       object without unreserving it.
997  *
998  * @bo:             Pointer to the struct ttm_buffer_object to fence.
999  * @fence:          Pointer to the fence. If NULL, this function will
1000  *                  insert a fence into the command stream..
1001  *
1002  * Contrary to the ttm_eu version of this function, it takes only
1003  * a single buffer object instead of a list, and it also doesn't
1004  * unreserve the buffer object, which needs to be done separately.
1005  */
1006 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1007 			 struct vmw_fence_obj *fence)
1008 {
1009 	struct ttm_bo_device *bdev = bo->bdev;
1010 
1011 	struct vmw_private *dev_priv =
1012 		container_of(bdev, struct vmw_private, bdev);
1013 
1014 	if (fence == NULL) {
1015 		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1016 		reservation_object_add_excl_fence(bo->resv, &fence->base);
1017 		dma_fence_put(&fence->base);
1018 	} else
1019 		reservation_object_add_excl_fence(bo->resv, &fence->base);
1020 }
1021 
1022 
1023 /**
1024  * vmw_dumb_create - Create a dumb kms buffer
1025  *
1026  * @file_priv: Pointer to a struct drm_file identifying the caller.
1027  * @dev: Pointer to the drm device.
1028  * @args: Pointer to a struct drm_mode_create_dumb structure
1029  * Return: Zero on success, negative error code on failure.
1030  *
1031  * This is a driver callback for the core drm create_dumb functionality.
1032  * Note that this is very similar to the vmw_bo_alloc ioctl, except
1033  * that the arguments have a different format.
1034  */
1035 int vmw_dumb_create(struct drm_file *file_priv,
1036 		    struct drm_device *dev,
1037 		    struct drm_mode_create_dumb *args)
1038 {
1039 	struct vmw_private *dev_priv = vmw_priv(dev);
1040 	struct vmw_buffer_object *vbo;
1041 	int ret;
1042 
1043 	args->pitch = args->width * ((args->bpp + 7) / 8);
1044 	args->size = args->pitch * args->height;
1045 
1046 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1047 	if (unlikely(ret != 0))
1048 		return ret;
1049 
1050 	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1051 				    args->size, false, &args->handle,
1052 				    &vbo, NULL);
1053 	if (unlikely(ret != 0))
1054 		goto out_no_bo;
1055 
1056 	vmw_bo_unreference(&vbo);
1057 out_no_bo:
1058 	ttm_read_unlock(&dev_priv->reservation_sem);
1059 	return ret;
1060 }
1061 
1062 
1063 /**
1064  * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1065  *
1066  * @file_priv: Pointer to a struct drm_file identifying the caller.
1067  * @dev: Pointer to the drm device.
1068  * @handle: Handle identifying the dumb buffer.
1069  * @offset: The address space offset returned.
1070  * Return: Zero on success, negative error code on failure.
1071  *
1072  * This is a driver callback for the core drm dumb_map_offset functionality.
1073  */
1074 int vmw_dumb_map_offset(struct drm_file *file_priv,
1075 			struct drm_device *dev, uint32_t handle,
1076 			uint64_t *offset)
1077 {
1078 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1079 	struct vmw_buffer_object *out_buf;
1080 	int ret;
1081 
1082 	ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1083 	if (ret != 0)
1084 		return -EINVAL;
1085 
1086 	*offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1087 	vmw_bo_unreference(&out_buf);
1088 	return 0;
1089 }
1090 
1091 
1092 /**
1093  * vmw_dumb_destroy - Destroy a dumb boffer
1094  *
1095  * @file_priv: Pointer to a struct drm_file identifying the caller.
1096  * @dev: Pointer to the drm device.
1097  * @handle: Handle identifying the dumb buffer.
1098  * Return: Zero on success, negative error code on failure.
1099  *
1100  * This is a driver callback for the core drm dumb_destroy functionality.
1101  */
1102 int vmw_dumb_destroy(struct drm_file *file_priv,
1103 		     struct drm_device *dev,
1104 		     uint32_t handle)
1105 {
1106 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1107 					 handle, TTM_REF_USAGE);
1108 }
1109 
1110 
1111 /**
1112  * vmw_bo_swap_notify - swapout notify callback.
1113  *
1114  * @bo: The buffer object to be swapped out.
1115  */
1116 void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1117 {
1118 	/* Is @bo embedded in a struct vmw_buffer_object? */
1119 	if (bo->destroy != vmw_bo_bo_free &&
1120 	    bo->destroy != vmw_user_bo_destroy)
1121 		return;
1122 
1123 	/* Kill any cached kernel maps before swapout */
1124 	vmw_bo_unmap(vmw_buffer_object(bo));
1125 }
1126 
1127 
1128 /**
1129  * vmw_bo_move_notify - TTM move_notify_callback
1130  *
1131  * @bo: The TTM buffer object about to move.
1132  * @mem: The struct ttm_mem_reg indicating to what memory
1133  *       region the move is taking place.
1134  *
1135  * Detaches cached maps and device bindings that require that the
1136  * buffer doesn't move.
1137  */
1138 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1139 			struct ttm_mem_reg *mem)
1140 {
1141 	struct vmw_buffer_object *vbo;
1142 
1143 	if (mem == NULL)
1144 		return;
1145 
1146 	/* Make sure @bo is embedded in a struct vmw_buffer_object? */
1147 	if (bo->destroy != vmw_bo_bo_free &&
1148 	    bo->destroy != vmw_user_bo_destroy)
1149 		return;
1150 
1151 	vbo = container_of(bo, struct vmw_buffer_object, base);
1152 
1153 	/*
1154 	 * Kill any cached kernel maps before move to or from VRAM.
1155 	 * With other types of moves, the underlying pages stay the same,
1156 	 * and the map can be kept.
1157 	 */
1158 	if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
1159 		vmw_bo_unmap(vbo);
1160 
1161 	/*
1162 	 * If we're moving a backup MOB out of MOB placement, then make sure we
1163 	 * read back all resource content first, and unbind the MOB from
1164 	 * the resource.
1165 	 */
1166 	if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
1167 		vmw_resource_unbind_list(vbo);
1168 }
1169