1 /* exynos_drm_gem.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11 
12 #include <drm/drmP.h>
13 #include <drm/drm_vma_manager.h>
14 
15 #include <linux/shmem_fs.h>
16 #include <linux/dma-buf.h>
17 #include <linux/pfn_t.h>
18 #include <drm/exynos_drm.h>
19 
20 #include "exynos_drm_drv.h"
21 #include "exynos_drm_gem.h"
22 
23 static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
24 {
25 	struct drm_device *dev = exynos_gem->base.dev;
26 	unsigned long attr;
27 	unsigned int nr_pages;
28 	struct sg_table sgt;
29 	int ret = -ENOMEM;
30 
31 	if (exynos_gem->dma_addr) {
32 		DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
33 		return 0;
34 	}
35 
36 	exynos_gem->dma_attrs = 0;
37 
38 	/*
39 	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
40 	 * region will be allocated else physically contiguous
41 	 * as possible.
42 	 */
43 	if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
44 		exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
45 
46 	/*
47 	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
48 	 * else cachable mapping.
49 	 */
50 	if (exynos_gem->flags & EXYNOS_BO_WC ||
51 			!(exynos_gem->flags & EXYNOS_BO_CACHABLE))
52 		attr = DMA_ATTR_WRITE_COMBINE;
53 	else
54 		attr = DMA_ATTR_NON_CONSISTENT;
55 
56 	exynos_gem->dma_attrs |= attr;
57 	exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
58 
59 	nr_pages = exynos_gem->size >> PAGE_SHIFT;
60 
61 	exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
62 			GFP_KERNEL | __GFP_ZERO);
63 	if (!exynos_gem->pages) {
64 		DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate pages.\n");
65 		return -ENOMEM;
66 	}
67 
68 	exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
69 					     &exynos_gem->dma_addr, GFP_KERNEL,
70 					     exynos_gem->dma_attrs);
71 	if (!exynos_gem->cookie) {
72 		DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
73 		goto err_free;
74 	}
75 
76 	ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
77 				    exynos_gem->dma_addr, exynos_gem->size,
78 				    exynos_gem->dma_attrs);
79 	if (ret < 0) {
80 		DRM_DEV_ERROR(to_dma_dev(dev), "failed to get sgtable.\n");
81 		goto err_dma_free;
82 	}
83 
84 	if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
85 					     nr_pages)) {
86 		DRM_DEV_ERROR(to_dma_dev(dev), "invalid sgtable.\n");
87 		ret = -EINVAL;
88 		goto err_sgt_free;
89 	}
90 
91 	sg_free_table(&sgt);
92 
93 	DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
94 			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
95 
96 	return 0;
97 
98 err_sgt_free:
99 	sg_free_table(&sgt);
100 err_dma_free:
101 	dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
102 		       exynos_gem->dma_addr, exynos_gem->dma_attrs);
103 err_free:
104 	kvfree(exynos_gem->pages);
105 
106 	return ret;
107 }
108 
109 static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
110 {
111 	struct drm_device *dev = exynos_gem->base.dev;
112 
113 	if (!exynos_gem->dma_addr) {
114 		DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n");
115 		return;
116 	}
117 
118 	DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr(0x%lx), size(0x%lx)\n",
119 			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
120 
121 	dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
122 			(dma_addr_t)exynos_gem->dma_addr,
123 			exynos_gem->dma_attrs);
124 
125 	kvfree(exynos_gem->pages);
126 }
127 
128 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
129 					struct drm_file *file_priv,
130 					unsigned int *handle)
131 {
132 	int ret;
133 
134 	/*
135 	 * allocate a id of idr table where the obj is registered
136 	 * and handle has the id what user can see.
137 	 */
138 	ret = drm_gem_handle_create(file_priv, obj, handle);
139 	if (ret)
140 		return ret;
141 
142 	DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle);
143 
144 	/* drop reference from allocate - handle holds it now. */
145 	drm_gem_object_put_unlocked(obj);
146 
147 	return 0;
148 }
149 
150 void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
151 {
152 	struct drm_gem_object *obj = &exynos_gem->base;
153 
154 	DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "handle count = %d\n",
155 			  obj->handle_count);
156 
157 	/*
158 	 * do not release memory region from exporter.
159 	 *
160 	 * the region will be released by exporter
161 	 * once dmabuf's refcount becomes 0.
162 	 */
163 	if (obj->import_attach)
164 		drm_prime_gem_destroy(obj, exynos_gem->sgt);
165 	else
166 		exynos_drm_free_buf(exynos_gem);
167 
168 	/* release file pointer to gem object. */
169 	drm_gem_object_release(obj);
170 
171 	kfree(exynos_gem);
172 }
173 
174 static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
175 						  unsigned long size)
176 {
177 	struct exynos_drm_gem *exynos_gem;
178 	struct drm_gem_object *obj;
179 	int ret;
180 
181 	exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
182 	if (!exynos_gem)
183 		return ERR_PTR(-ENOMEM);
184 
185 	exynos_gem->size = size;
186 	obj = &exynos_gem->base;
187 
188 	ret = drm_gem_object_init(dev, obj, size);
189 	if (ret < 0) {
190 		DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");
191 		kfree(exynos_gem);
192 		return ERR_PTR(ret);
193 	}
194 
195 	ret = drm_gem_create_mmap_offset(obj);
196 	if (ret < 0) {
197 		drm_gem_object_release(obj);
198 		kfree(exynos_gem);
199 		return ERR_PTR(ret);
200 	}
201 
202 	DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp);
203 
204 	return exynos_gem;
205 }
206 
207 struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
208 					     unsigned int flags,
209 					     unsigned long size)
210 {
211 	struct exynos_drm_gem *exynos_gem;
212 	int ret;
213 
214 	if (flags & ~(EXYNOS_BO_MASK)) {
215 		DRM_DEV_ERROR(dev->dev,
216 			      "invalid GEM buffer flags: %u\n", flags);
217 		return ERR_PTR(-EINVAL);
218 	}
219 
220 	if (!size) {
221 		DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size);
222 		return ERR_PTR(-EINVAL);
223 	}
224 
225 	size = roundup(size, PAGE_SIZE);
226 
227 	exynos_gem = exynos_drm_gem_init(dev, size);
228 	if (IS_ERR(exynos_gem))
229 		return exynos_gem;
230 
231 	if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
232 		/*
233 		 * when no IOMMU is available, all allocated buffers are
234 		 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
235 		 */
236 		flags &= ~EXYNOS_BO_NONCONTIG;
237 		DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
238 	}
239 
240 	/* set memory type and cache attribute from user side. */
241 	exynos_gem->flags = flags;
242 
243 	ret = exynos_drm_alloc_buf(exynos_gem);
244 	if (ret < 0) {
245 		drm_gem_object_release(&exynos_gem->base);
246 		kfree(exynos_gem);
247 		return ERR_PTR(ret);
248 	}
249 
250 	return exynos_gem;
251 }
252 
253 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
254 				struct drm_file *file_priv)
255 {
256 	struct drm_exynos_gem_create *args = data;
257 	struct exynos_drm_gem *exynos_gem;
258 	int ret;
259 
260 	exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
261 	if (IS_ERR(exynos_gem))
262 		return PTR_ERR(exynos_gem);
263 
264 	ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
265 					   &args->handle);
266 	if (ret) {
267 		exynos_drm_gem_destroy(exynos_gem);
268 		return ret;
269 	}
270 
271 	return 0;
272 }
273 
274 int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
275 			     struct drm_file *file_priv)
276 {
277 	struct drm_exynos_gem_map *args = data;
278 
279 	return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
280 				       &args->offset);
281 }
282 
283 struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
284 					  unsigned int gem_handle)
285 {
286 	struct drm_gem_object *obj;
287 
288 	obj = drm_gem_object_lookup(filp, gem_handle);
289 	if (!obj)
290 		return NULL;
291 	return to_exynos_gem(obj);
292 }
293 
294 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
295 				      struct vm_area_struct *vma)
296 {
297 	struct drm_device *drm_dev = exynos_gem->base.dev;
298 	unsigned long vm_size;
299 	int ret;
300 
301 	vma->vm_flags &= ~VM_PFNMAP;
302 	vma->vm_pgoff = 0;
303 
304 	vm_size = vma->vm_end - vma->vm_start;
305 
306 	/* check if user-requested size is valid. */
307 	if (vm_size > exynos_gem->size)
308 		return -EINVAL;
309 
310 	ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
311 			     exynos_gem->dma_addr, exynos_gem->size,
312 			     exynos_gem->dma_attrs);
313 	if (ret < 0) {
314 		DRM_ERROR("failed to mmap.\n");
315 		return ret;
316 	}
317 
318 	return 0;
319 }
320 
321 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
322 				      struct drm_file *file_priv)
323 {
324 	struct exynos_drm_gem *exynos_gem;
325 	struct drm_exynos_gem_info *args = data;
326 	struct drm_gem_object *obj;
327 
328 	obj = drm_gem_object_lookup(file_priv, args->handle);
329 	if (!obj) {
330 		DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n");
331 		return -EINVAL;
332 	}
333 
334 	exynos_gem = to_exynos_gem(obj);
335 
336 	args->flags = exynos_gem->flags;
337 	args->size = exynos_gem->size;
338 
339 	drm_gem_object_put_unlocked(obj);
340 
341 	return 0;
342 }
343 
344 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
345 {
346 	exynos_drm_gem_destroy(to_exynos_gem(obj));
347 }
348 
349 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
350 			       struct drm_device *dev,
351 			       struct drm_mode_create_dumb *args)
352 {
353 	struct exynos_drm_gem *exynos_gem;
354 	unsigned int flags;
355 	int ret;
356 
357 	/*
358 	 * allocate memory to be used for framebuffer.
359 	 * - this callback would be called by user application
360 	 *	with DRM_IOCTL_MODE_CREATE_DUMB command.
361 	 */
362 
363 	args->pitch = args->width * ((args->bpp + 7) / 8);
364 	args->size = args->pitch * args->height;
365 
366 	if (is_drm_iommu_supported(dev))
367 		flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
368 	else
369 		flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
370 
371 	exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
372 	if (IS_ERR(exynos_gem)) {
373 		dev_warn(dev->dev, "FB allocation failed.\n");
374 		return PTR_ERR(exynos_gem);
375 	}
376 
377 	ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
378 					   &args->handle);
379 	if (ret) {
380 		exynos_drm_gem_destroy(exynos_gem);
381 		return ret;
382 	}
383 
384 	return 0;
385 }
386 
387 vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf)
388 {
389 	struct vm_area_struct *vma = vmf->vma;
390 	struct drm_gem_object *obj = vma->vm_private_data;
391 	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
392 	unsigned long pfn;
393 	pgoff_t page_offset;
394 
395 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
396 
397 	if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
398 		DRM_ERROR("invalid page offset\n");
399 		return VM_FAULT_SIGBUS;
400 	}
401 
402 	pfn = page_to_pfn(exynos_gem->pages[page_offset]);
403 	return vmf_insert_mixed(vma, vmf->address,
404 			__pfn_to_pfn_t(pfn, PFN_DEV));
405 }
406 
407 static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
408 				   struct vm_area_struct *vma)
409 {
410 	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
411 	int ret;
412 
413 	DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
414 			  exynos_gem->flags);
415 
416 	/* non-cachable as default. */
417 	if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
418 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
419 	else if (exynos_gem->flags & EXYNOS_BO_WC)
420 		vma->vm_page_prot =
421 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
422 	else
423 		vma->vm_page_prot =
424 			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
425 
426 	ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
427 	if (ret)
428 		goto err_close_vm;
429 
430 	return ret;
431 
432 err_close_vm:
433 	drm_gem_vm_close(vma);
434 
435 	return ret;
436 }
437 
438 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
439 {
440 	struct drm_gem_object *obj;
441 	int ret;
442 
443 	/* set vm_area_struct. */
444 	ret = drm_gem_mmap(filp, vma);
445 	if (ret < 0) {
446 		DRM_ERROR("failed to mmap.\n");
447 		return ret;
448 	}
449 
450 	obj = vma->vm_private_data;
451 
452 	if (obj->import_attach)
453 		return dma_buf_mmap(obj->dma_buf, vma, 0);
454 
455 	return exynos_drm_gem_mmap_obj(obj, vma);
456 }
457 
458 /* low-level interface prime helpers */
459 struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
460 					    struct dma_buf *dma_buf)
461 {
462 	return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
463 }
464 
465 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
466 {
467 	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
468 	int npages;
469 
470 	npages = exynos_gem->size >> PAGE_SHIFT;
471 
472 	return drm_prime_pages_to_sg(exynos_gem->pages, npages);
473 }
474 
475 struct drm_gem_object *
476 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
477 				     struct dma_buf_attachment *attach,
478 				     struct sg_table *sgt)
479 {
480 	struct exynos_drm_gem *exynos_gem;
481 	int npages;
482 	int ret;
483 
484 	exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
485 	if (IS_ERR(exynos_gem)) {
486 		ret = PTR_ERR(exynos_gem);
487 		return ERR_PTR(ret);
488 	}
489 
490 	exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
491 
492 	npages = exynos_gem->size >> PAGE_SHIFT;
493 	exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
494 	if (!exynos_gem->pages) {
495 		ret = -ENOMEM;
496 		goto err;
497 	}
498 
499 	ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
500 					       npages);
501 	if (ret < 0)
502 		goto err_free_large;
503 
504 	exynos_gem->sgt = sgt;
505 
506 	if (sgt->nents == 1) {
507 		/* always physically continuous memory if sgt->nents is 1. */
508 		exynos_gem->flags |= EXYNOS_BO_CONTIG;
509 	} else {
510 		/*
511 		 * this case could be CONTIG or NONCONTIG type but for now
512 		 * sets NONCONTIG.
513 		 * TODO. we have to find a way that exporter can notify
514 		 * the type of its own buffer to importer.
515 		 */
516 		exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
517 	}
518 
519 	return &exynos_gem->base;
520 
521 err_free_large:
522 	kvfree(exynos_gem->pages);
523 err:
524 	drm_gem_object_release(&exynos_gem->base);
525 	kfree(exynos_gem);
526 	return ERR_PTR(ret);
527 }
528 
529 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
530 {
531 	return NULL;
532 }
533 
534 void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
535 {
536 	/* Nothing to do */
537 }
538 
539 int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
540 			      struct vm_area_struct *vma)
541 {
542 	int ret;
543 
544 	ret = drm_gem_mmap_obj(obj, obj->size, vma);
545 	if (ret < 0)
546 		return ret;
547 
548 	return exynos_drm_gem_mmap_obj(obj, vma);
549 }
550