xref: /openbmc/linux/drivers/gpu/drm/tegra/gem.c (revision 6774def6)
1 /*
2  * NVIDIA Tegra DRM GEM helper functions
3  *
4  * Copyright (C) 2012 Sascha Hauer, Pengutronix
5  * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
6  *
7  * Based on the GEM/CMA helpers
8  *
9  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  */
15 
16 #include <linux/dma-buf.h>
17 #include <drm/tegra_drm.h>
18 
19 #include "drm.h"
20 #include "gem.h"
21 
22 static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
23 {
24 	return container_of(bo, struct tegra_bo, base);
25 }
26 
27 static void tegra_bo_put(struct host1x_bo *bo)
28 {
29 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
30 	struct drm_device *drm = obj->gem.dev;
31 
32 	mutex_lock(&drm->struct_mutex);
33 	drm_gem_object_unreference(&obj->gem);
34 	mutex_unlock(&drm->struct_mutex);
35 }
36 
37 static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
38 {
39 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
40 
41 	return obj->paddr;
42 }
43 
44 static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
45 {
46 }
47 
48 static void *tegra_bo_mmap(struct host1x_bo *bo)
49 {
50 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
51 
52 	return obj->vaddr;
53 }
54 
55 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
56 {
57 }
58 
59 static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
60 {
61 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
62 
63 	return obj->vaddr + page * PAGE_SIZE;
64 }
65 
66 static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
67 			    void *addr)
68 {
69 }
70 
71 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
72 {
73 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
74 	struct drm_device *drm = obj->gem.dev;
75 
76 	mutex_lock(&drm->struct_mutex);
77 	drm_gem_object_reference(&obj->gem);
78 	mutex_unlock(&drm->struct_mutex);
79 
80 	return bo;
81 }
82 
83 static const struct host1x_bo_ops tegra_bo_ops = {
84 	.get = tegra_bo_get,
85 	.put = tegra_bo_put,
86 	.pin = tegra_bo_pin,
87 	.unpin = tegra_bo_unpin,
88 	.mmap = tegra_bo_mmap,
89 	.munmap = tegra_bo_munmap,
90 	.kmap = tegra_bo_kmap,
91 	.kunmap = tegra_bo_kunmap,
92 };
93 
94 static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
95 {
96 	dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
97 }
98 
99 struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
100 				 unsigned long flags)
101 {
102 	struct tegra_bo *bo;
103 	int err;
104 
105 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
106 	if (!bo)
107 		return ERR_PTR(-ENOMEM);
108 
109 	host1x_bo_init(&bo->base, &tegra_bo_ops);
110 	size = round_up(size, PAGE_SIZE);
111 
112 	bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
113 					   GFP_KERNEL | __GFP_NOWARN);
114 	if (!bo->vaddr) {
115 		dev_err(drm->dev, "failed to allocate buffer with size %u\n",
116 			size);
117 		err = -ENOMEM;
118 		goto err_dma;
119 	}
120 
121 	err = drm_gem_object_init(drm, &bo->gem, size);
122 	if (err)
123 		goto err_init;
124 
125 	err = drm_gem_create_mmap_offset(&bo->gem);
126 	if (err)
127 		goto err_mmap;
128 
129 	if (flags & DRM_TEGRA_GEM_CREATE_TILED)
130 		bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
131 
132 	if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
133 		bo->flags |= TEGRA_BO_BOTTOM_UP;
134 
135 	return bo;
136 
137 err_mmap:
138 	drm_gem_object_release(&bo->gem);
139 err_init:
140 	tegra_bo_destroy(drm, bo);
141 err_dma:
142 	kfree(bo);
143 
144 	return ERR_PTR(err);
145 }
146 
147 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
148 					     struct drm_device *drm,
149 					     unsigned int size,
150 					     unsigned long flags,
151 					     unsigned int *handle)
152 {
153 	struct tegra_bo *bo;
154 	int ret;
155 
156 	bo = tegra_bo_create(drm, size, flags);
157 	if (IS_ERR(bo))
158 		return bo;
159 
160 	ret = drm_gem_handle_create(file, &bo->gem, handle);
161 	if (ret)
162 		goto err;
163 
164 	drm_gem_object_unreference_unlocked(&bo->gem);
165 
166 	return bo;
167 
168 err:
169 	tegra_bo_free_object(&bo->gem);
170 	return ERR_PTR(ret);
171 }
172 
173 static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
174 					struct dma_buf *buf)
175 {
176 	struct dma_buf_attachment *attach;
177 	struct tegra_bo *bo;
178 	ssize_t size;
179 	int err;
180 
181 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
182 	if (!bo)
183 		return ERR_PTR(-ENOMEM);
184 
185 	host1x_bo_init(&bo->base, &tegra_bo_ops);
186 	size = round_up(buf->size, PAGE_SIZE);
187 
188 	err = drm_gem_object_init(drm, &bo->gem, size);
189 	if (err < 0)
190 		goto free;
191 
192 	err = drm_gem_create_mmap_offset(&bo->gem);
193 	if (err < 0)
194 		goto release;
195 
196 	attach = dma_buf_attach(buf, drm->dev);
197 	if (IS_ERR(attach)) {
198 		err = PTR_ERR(attach);
199 		goto free_mmap;
200 	}
201 
202 	get_dma_buf(buf);
203 
204 	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
205 	if (!bo->sgt) {
206 		err = -ENOMEM;
207 		goto detach;
208 	}
209 
210 	if (IS_ERR(bo->sgt)) {
211 		err = PTR_ERR(bo->sgt);
212 		goto detach;
213 	}
214 
215 	if (bo->sgt->nents > 1) {
216 		err = -EINVAL;
217 		goto detach;
218 	}
219 
220 	bo->paddr = sg_dma_address(bo->sgt->sgl);
221 	bo->gem.import_attach = attach;
222 
223 	return bo;
224 
225 detach:
226 	if (!IS_ERR_OR_NULL(bo->sgt))
227 		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
228 
229 	dma_buf_detach(buf, attach);
230 	dma_buf_put(buf);
231 free_mmap:
232 	drm_gem_free_mmap_offset(&bo->gem);
233 release:
234 	drm_gem_object_release(&bo->gem);
235 free:
236 	kfree(bo);
237 
238 	return ERR_PTR(err);
239 }
240 
241 void tegra_bo_free_object(struct drm_gem_object *gem)
242 {
243 	struct tegra_bo *bo = to_tegra_bo(gem);
244 
245 	if (gem->import_attach) {
246 		dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
247 					 DMA_TO_DEVICE);
248 		drm_prime_gem_destroy(gem, NULL);
249 	} else {
250 		tegra_bo_destroy(gem->dev, bo);
251 	}
252 
253 	drm_gem_free_mmap_offset(gem);
254 	drm_gem_object_release(gem);
255 
256 	kfree(bo);
257 }
258 
259 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
260 			 struct drm_mode_create_dumb *args)
261 {
262 	int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
263 	struct tegra_drm *tegra = drm->dev_private;
264 	struct tegra_bo *bo;
265 
266 	min_pitch = round_up(min_pitch, tegra->pitch_align);
267 	if (args->pitch < min_pitch)
268 		args->pitch = min_pitch;
269 
270 	if (args->size < args->pitch * args->height)
271 		args->size = args->pitch * args->height;
272 
273 	bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
274 					 &args->handle);
275 	if (IS_ERR(bo))
276 		return PTR_ERR(bo);
277 
278 	return 0;
279 }
280 
281 int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
282 			     uint32_t handle, uint64_t *offset)
283 {
284 	struct drm_gem_object *gem;
285 	struct tegra_bo *bo;
286 
287 	mutex_lock(&drm->struct_mutex);
288 
289 	gem = drm_gem_object_lookup(drm, file, handle);
290 	if (!gem) {
291 		dev_err(drm->dev, "failed to lookup GEM object\n");
292 		mutex_unlock(&drm->struct_mutex);
293 		return -EINVAL;
294 	}
295 
296 	bo = to_tegra_bo(gem);
297 
298 	*offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
299 
300 	drm_gem_object_unreference(gem);
301 
302 	mutex_unlock(&drm->struct_mutex);
303 
304 	return 0;
305 }
306 
307 const struct vm_operations_struct tegra_bo_vm_ops = {
308 	.open = drm_gem_vm_open,
309 	.close = drm_gem_vm_close,
310 };
311 
312 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
313 {
314 	struct drm_gem_object *gem;
315 	struct tegra_bo *bo;
316 	int ret;
317 
318 	ret = drm_gem_mmap(file, vma);
319 	if (ret)
320 		return ret;
321 
322 	gem = vma->vm_private_data;
323 	bo = to_tegra_bo(gem);
324 
325 	ret = remap_pfn_range(vma, vma->vm_start, bo->paddr >> PAGE_SHIFT,
326 			      vma->vm_end - vma->vm_start, vma->vm_page_prot);
327 	if (ret)
328 		drm_gem_vm_close(vma);
329 
330 	return ret;
331 }
332 
333 static struct sg_table *
334 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
335 			    enum dma_data_direction dir)
336 {
337 	struct drm_gem_object *gem = attach->dmabuf->priv;
338 	struct tegra_bo *bo = to_tegra_bo(gem);
339 	struct sg_table *sgt;
340 
341 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
342 	if (!sgt)
343 		return NULL;
344 
345 	if (sg_alloc_table(sgt, 1, GFP_KERNEL)) {
346 		kfree(sgt);
347 		return NULL;
348 	}
349 
350 	sg_dma_address(sgt->sgl) = bo->paddr;
351 	sg_dma_len(sgt->sgl) = gem->size;
352 
353 	return sgt;
354 }
355 
356 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
357 					  struct sg_table *sgt,
358 					  enum dma_data_direction dir)
359 {
360 	sg_free_table(sgt);
361 	kfree(sgt);
362 }
363 
364 static void tegra_gem_prime_release(struct dma_buf *buf)
365 {
366 	drm_gem_dmabuf_release(buf);
367 }
368 
369 static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
370 					 unsigned long page)
371 {
372 	return NULL;
373 }
374 
375 static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
376 					  unsigned long page,
377 					  void *addr)
378 {
379 }
380 
381 static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
382 {
383 	return NULL;
384 }
385 
386 static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
387 				   void *addr)
388 {
389 }
390 
391 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
392 {
393 	return -EINVAL;
394 }
395 
396 static void *tegra_gem_prime_vmap(struct dma_buf *buf)
397 {
398 	struct drm_gem_object *gem = buf->priv;
399 	struct tegra_bo *bo = to_tegra_bo(gem);
400 
401 	return bo->vaddr;
402 }
403 
404 static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
405 {
406 }
407 
408 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
409 	.map_dma_buf = tegra_gem_prime_map_dma_buf,
410 	.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
411 	.release = tegra_gem_prime_release,
412 	.kmap_atomic = tegra_gem_prime_kmap_atomic,
413 	.kunmap_atomic = tegra_gem_prime_kunmap_atomic,
414 	.kmap = tegra_gem_prime_kmap,
415 	.kunmap = tegra_gem_prime_kunmap,
416 	.mmap = tegra_gem_prime_mmap,
417 	.vmap = tegra_gem_prime_vmap,
418 	.vunmap = tegra_gem_prime_vunmap,
419 };
420 
421 struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
422 				       struct drm_gem_object *gem,
423 				       int flags)
424 {
425 	return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size,
426 			      flags, NULL);
427 }
428 
429 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
430 					      struct dma_buf *buf)
431 {
432 	struct tegra_bo *bo;
433 
434 	if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
435 		struct drm_gem_object *gem = buf->priv;
436 
437 		if (gem->dev == drm) {
438 			drm_gem_object_reference(gem);
439 			return gem;
440 		}
441 	}
442 
443 	bo = tegra_bo_import(drm, buf);
444 	if (IS_ERR(bo))
445 		return ERR_CAST(bo);
446 
447 	return &bo->gem;
448 }
449