1 /*
2  * Copyright 2014 Canonical
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Andreas Pokorny
23  */
24 
25 #include "virtgpu_drv.h"
26 
27 /* Empty Implementations as there should not be any other driver for a virtual
28  * device that might share buffers with virtgpu
29  */
30 
31 struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
32 {
33 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
34 
35 	if (!bo->tbo.ttm->pages || !bo->tbo.ttm->num_pages)
36 		/* should not happen */
37 		return ERR_PTR(-EINVAL);
38 
39 	return drm_prime_pages_to_sg(bo->tbo.ttm->pages,
40 				     bo->tbo.ttm->num_pages);
41 }
42 
43 struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
44 	struct drm_device *dev, struct dma_buf_attachment *attach,
45 	struct sg_table *table)
46 {
47 	return ERR_PTR(-ENODEV);
48 }
49 
50 void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
51 {
52 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
53 	int ret;
54 
55 	ret = virtio_gpu_object_kmap(bo);
56 	if (ret)
57 		return NULL;
58 	return bo->vmap;
59 }
60 
61 void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
62 {
63 	virtio_gpu_object_kunmap(gem_to_virtio_gpu_obj(obj));
64 }
65 
66 int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
67 			   struct vm_area_struct *vma)
68 {
69 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
70 
71 	bo->gem_base.vma_node.vm_node.start = bo->tbo.vma_node.vm_node.start;
72 	return drm_gem_prime_mmap(obj, vma);
73 }
74