1 /*
2  * Copyright 2012 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * based on nouveau_prime.c
23  *
24  * Authors: Alex Deucher
25  */
26 #include <drm/drmP.h>
27 
28 #include "radeon.h"
29 #include <drm/radeon_drm.h>
30 
31 #include <linux/dma-buf.h>
32 
33 static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment,
34 					       enum dma_data_direction dir)
35 {
36 	struct radeon_bo *bo = attachment->dmabuf->priv;
37 	struct drm_device *dev = bo->rdev->ddev;
38 	int npages = bo->tbo.num_pages;
39 	struct sg_table *sg;
40 	int nents;
41 
42 	mutex_lock(&dev->struct_mutex);
43 	sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
44 	nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
45 	mutex_unlock(&dev->struct_mutex);
46 	return sg;
47 }
48 
49 static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
50 				     struct sg_table *sg, enum dma_data_direction dir)
51 {
52 	dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
53 	sg_free_table(sg);
54 	kfree(sg);
55 }
56 
57 static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf)
58 {
59 	struct radeon_bo *bo = dma_buf->priv;
60 
61 	if (bo->gem_base.export_dma_buf == dma_buf) {
62 		DRM_ERROR("unreference dmabuf %p\n", &bo->gem_base);
63 		bo->gem_base.export_dma_buf = NULL;
64 		drm_gem_object_unreference_unlocked(&bo->gem_base);
65 	}
66 }
67 
68 static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
69 {
70 	return NULL;
71 }
72 
73 static void radeon_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
74 {
75 
76 }
77 static void *radeon_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
78 {
79 	return NULL;
80 }
81 
82 static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
83 {
84 
85 }
86 
87 static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
88 {
89 	return -EINVAL;
90 }
91 
92 static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
93 {
94 	struct radeon_bo *bo = dma_buf->priv;
95 	struct drm_device *dev = bo->rdev->ddev;
96 	int ret;
97 
98 	mutex_lock(&dev->struct_mutex);
99 	if (bo->vmapping_count) {
100 		bo->vmapping_count++;
101 		goto out_unlock;
102 	}
103 
104 	ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
105 			  &bo->dma_buf_vmap);
106 	if (ret) {
107 		mutex_unlock(&dev->struct_mutex);
108 		return ERR_PTR(ret);
109 	}
110 	bo->vmapping_count = 1;
111 out_unlock:
112 	mutex_unlock(&dev->struct_mutex);
113 	return bo->dma_buf_vmap.virtual;
114 }
115 
116 static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
117 {
118 	struct radeon_bo *bo = dma_buf->priv;
119 	struct drm_device *dev = bo->rdev->ddev;
120 
121 	mutex_lock(&dev->struct_mutex);
122 	bo->vmapping_count--;
123 	if (bo->vmapping_count == 0) {
124 		ttm_bo_kunmap(&bo->dma_buf_vmap);
125 	}
126 	mutex_unlock(&dev->struct_mutex);
127 }
128 const static struct dma_buf_ops radeon_dmabuf_ops =  {
129 	.map_dma_buf = radeon_gem_map_dma_buf,
130 	.unmap_dma_buf = radeon_gem_unmap_dma_buf,
131 	.release = radeon_gem_dmabuf_release,
132 	.kmap = radeon_gem_kmap,
133 	.kmap_atomic = radeon_gem_kmap_atomic,
134 	.kunmap = radeon_gem_kunmap,
135 	.kunmap_atomic = radeon_gem_kunmap_atomic,
136 	.mmap = radeon_gem_prime_mmap,
137 	.vmap = radeon_gem_prime_vmap,
138 	.vunmap = radeon_gem_prime_vunmap,
139 };
140 
141 static int radeon_prime_create(struct drm_device *dev,
142 			       size_t size,
143 			       struct sg_table *sg,
144 			       struct radeon_bo **pbo)
145 {
146 	struct radeon_device *rdev = dev->dev_private;
147 	struct radeon_bo *bo;
148 	int ret;
149 
150 	ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
151 			       RADEON_GEM_DOMAIN_GTT, sg, pbo);
152 	if (ret)
153 		return ret;
154 	bo = *pbo;
155 	bo->gem_base.driver_private = bo;
156 
157 	mutex_lock(&rdev->gem.mutex);
158 	list_add_tail(&bo->list, &rdev->gem.objects);
159 	mutex_unlock(&rdev->gem.mutex);
160 
161 	return 0;
162 }
163 
164 struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
165 					struct drm_gem_object *obj,
166 					int flags)
167 {
168 	struct radeon_bo *bo = gem_to_radeon_bo(obj);
169 	int ret = 0;
170 
171 	ret = radeon_bo_reserve(bo, false);
172 	if (unlikely(ret != 0))
173 		return ERR_PTR(ret);
174 
175 	/* pin buffer into GTT */
176 	ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
177 	if (ret) {
178 		radeon_bo_unreserve(bo);
179 		return ERR_PTR(ret);
180 	}
181 	radeon_bo_unreserve(bo);
182 	return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
183 }
184 
185 struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
186 					       struct dma_buf *dma_buf)
187 {
188 	struct dma_buf_attachment *attach;
189 	struct sg_table *sg;
190 	struct radeon_bo *bo;
191 	int ret;
192 
193 	if (dma_buf->ops == &radeon_dmabuf_ops) {
194 		bo = dma_buf->priv;
195 		if (bo->gem_base.dev == dev) {
196 			drm_gem_object_reference(&bo->gem_base);
197 			dma_buf_put(dma_buf);
198 			return &bo->gem_base;
199 		}
200 	}
201 
202 	/* need to attach */
203 	attach = dma_buf_attach(dma_buf, dev->dev);
204 	if (IS_ERR(attach))
205 		return ERR_CAST(attach);
206 
207 	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
208 	if (IS_ERR(sg)) {
209 		ret = PTR_ERR(sg);
210 		goto fail_detach;
211 	}
212 
213 	ret = radeon_prime_create(dev, dma_buf->size, sg, &bo);
214 	if (ret)
215 		goto fail_unmap;
216 
217 	bo->gem_base.import_attach = attach;
218 
219 	return &bo->gem_base;
220 
221 fail_unmap:
222 	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
223 fail_detach:
224 	dma_buf_detach(dma_buf, attach);
225 	return ERR_PTR(ret);
226 }
227