1 /* exynos_drm_gem.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11 
12 #include <drm/drmP.h>
13 #include <drm/drm_vma_manager.h>
14 
15 #include <linux/shmem_fs.h>
16 #include <linux/dma-buf.h>
17 #include <drm/exynos_drm.h>
18 
19 #include "exynos_drm_drv.h"
20 #include "exynos_drm_gem.h"
21 #include "exynos_drm_iommu.h"
22 
23 static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
24 {
25 	struct drm_device *dev = exynos_gem->base.dev;
26 	enum dma_attr attr;
27 	unsigned int nr_pages;
28 	struct sg_table sgt;
29 	int ret = -ENOMEM;
30 
31 	if (exynos_gem->dma_addr) {
32 		DRM_DEBUG_KMS("already allocated.\n");
33 		return 0;
34 	}
35 
36 	init_dma_attrs(&exynos_gem->dma_attrs);
37 
38 	/*
39 	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
40 	 * region will be allocated else physically contiguous
41 	 * as possible.
42 	 */
43 	if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
44 		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &exynos_gem->dma_attrs);
45 
46 	/*
47 	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
48 	 * else cachable mapping.
49 	 */
50 	if (exynos_gem->flags & EXYNOS_BO_WC ||
51 			!(exynos_gem->flags & EXYNOS_BO_CACHABLE))
52 		attr = DMA_ATTR_WRITE_COMBINE;
53 	else
54 		attr = DMA_ATTR_NON_CONSISTENT;
55 
56 	dma_set_attr(attr, &exynos_gem->dma_attrs);
57 	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &exynos_gem->dma_attrs);
58 
59 	nr_pages = exynos_gem->size >> PAGE_SHIFT;
60 
61 	exynos_gem->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
62 	if (!exynos_gem->pages) {
63 		DRM_ERROR("failed to allocate pages.\n");
64 		return -ENOMEM;
65 	}
66 
67 	exynos_gem->cookie = dma_alloc_attrs(dev->dev, exynos_gem->size,
68 					     &exynos_gem->dma_addr, GFP_KERNEL,
69 					     &exynos_gem->dma_attrs);
70 	if (!exynos_gem->cookie) {
71 		DRM_ERROR("failed to allocate buffer.\n");
72 		goto err_free;
73 	}
74 
75 	ret = dma_get_sgtable_attrs(dev->dev, &sgt, exynos_gem->cookie,
76 				    exynos_gem->dma_addr, exynos_gem->size,
77 				    &exynos_gem->dma_attrs);
78 	if (ret < 0) {
79 		DRM_ERROR("failed to get sgtable.\n");
80 		goto err_dma_free;
81 	}
82 
83 	if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
84 					     nr_pages)) {
85 		DRM_ERROR("invalid sgtable.\n");
86 		ret = -EINVAL;
87 		goto err_sgt_free;
88 	}
89 
90 	sg_free_table(&sgt);
91 
92 	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
93 			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
94 
95 	return 0;
96 
97 err_sgt_free:
98 	sg_free_table(&sgt);
99 err_dma_free:
100 	dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie,
101 		       exynos_gem->dma_addr, &exynos_gem->dma_attrs);
102 err_free:
103 	drm_free_large(exynos_gem->pages);
104 
105 	return ret;
106 }
107 
108 static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
109 {
110 	struct drm_device *dev = exynos_gem->base.dev;
111 
112 	if (!exynos_gem->dma_addr) {
113 		DRM_DEBUG_KMS("dma_addr is invalid.\n");
114 		return;
115 	}
116 
117 	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
118 			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
119 
120 	dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie,
121 			(dma_addr_t)exynos_gem->dma_addr,
122 			&exynos_gem->dma_attrs);
123 
124 	drm_free_large(exynos_gem->pages);
125 }
126 
127 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
128 					struct drm_file *file_priv,
129 					unsigned int *handle)
130 {
131 	int ret;
132 
133 	/*
134 	 * allocate a id of idr table where the obj is registered
135 	 * and handle has the id what user can see.
136 	 */
137 	ret = drm_gem_handle_create(file_priv, obj, handle);
138 	if (ret)
139 		return ret;
140 
141 	DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
142 
143 	/* drop reference from allocate - handle holds it now. */
144 	drm_gem_object_unreference_unlocked(obj);
145 
146 	return 0;
147 }
148 
149 void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
150 {
151 	struct drm_gem_object *obj = &exynos_gem->base;
152 
153 	DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
154 
155 	/*
156 	 * do not release memory region from exporter.
157 	 *
158 	 * the region will be released by exporter
159 	 * once dmabuf's refcount becomes 0.
160 	 */
161 	if (obj->import_attach)
162 		drm_prime_gem_destroy(obj, exynos_gem->sgt);
163 	else
164 		exynos_drm_free_buf(exynos_gem);
165 
166 	/* release file pointer to gem object. */
167 	drm_gem_object_release(obj);
168 
169 	kfree(exynos_gem);
170 }
171 
172 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
173 						unsigned int gem_handle,
174 						struct drm_file *file_priv)
175 {
176 	struct exynos_drm_gem *exynos_gem;
177 	struct drm_gem_object *obj;
178 
179 	obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
180 	if (!obj) {
181 		DRM_ERROR("failed to lookup gem object.\n");
182 		return 0;
183 	}
184 
185 	exynos_gem = to_exynos_gem(obj);
186 
187 	drm_gem_object_unreference_unlocked(obj);
188 
189 	return exynos_gem->size;
190 }
191 
192 static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
193 						  unsigned long size)
194 {
195 	struct exynos_drm_gem *exynos_gem;
196 	struct drm_gem_object *obj;
197 	int ret;
198 
199 	exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
200 	if (!exynos_gem)
201 		return ERR_PTR(-ENOMEM);
202 
203 	exynos_gem->size = size;
204 	obj = &exynos_gem->base;
205 
206 	ret = drm_gem_object_init(dev, obj, size);
207 	if (ret < 0) {
208 		DRM_ERROR("failed to initialize gem object\n");
209 		kfree(exynos_gem);
210 		return ERR_PTR(ret);
211 	}
212 
213 	ret = drm_gem_create_mmap_offset(obj);
214 	if (ret < 0) {
215 		drm_gem_object_release(obj);
216 		kfree(exynos_gem);
217 		return ERR_PTR(ret);
218 	}
219 
220 	DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
221 
222 	return exynos_gem;
223 }
224 
225 struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
226 					     unsigned int flags,
227 					     unsigned long size)
228 {
229 	struct exynos_drm_gem *exynos_gem;
230 	int ret;
231 
232 	if (flags & ~(EXYNOS_BO_MASK)) {
233 		DRM_ERROR("invalid flags.\n");
234 		return ERR_PTR(-EINVAL);
235 	}
236 
237 	if (!size) {
238 		DRM_ERROR("invalid size.\n");
239 		return ERR_PTR(-EINVAL);
240 	}
241 
242 	size = roundup(size, PAGE_SIZE);
243 
244 	exynos_gem = exynos_drm_gem_init(dev, size);
245 	if (IS_ERR(exynos_gem))
246 		return exynos_gem;
247 
248 	/* set memory type and cache attribute from user side. */
249 	exynos_gem->flags = flags;
250 
251 	ret = exynos_drm_alloc_buf(exynos_gem);
252 	if (ret < 0) {
253 		drm_gem_object_release(&exynos_gem->base);
254 		kfree(exynos_gem);
255 		return ERR_PTR(ret);
256 	}
257 
258 	return exynos_gem;
259 }
260 
261 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
262 				struct drm_file *file_priv)
263 {
264 	struct drm_exynos_gem_create *args = data;
265 	struct exynos_drm_gem *exynos_gem;
266 	int ret;
267 
268 	exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
269 	if (IS_ERR(exynos_gem))
270 		return PTR_ERR(exynos_gem);
271 
272 	ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
273 					   &args->handle);
274 	if (ret) {
275 		exynos_drm_gem_destroy(exynos_gem);
276 		return ret;
277 	}
278 
279 	return 0;
280 }
281 
282 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
283 					unsigned int gem_handle,
284 					struct drm_file *filp)
285 {
286 	struct exynos_drm_gem *exynos_gem;
287 	struct drm_gem_object *obj;
288 
289 	obj = drm_gem_object_lookup(dev, filp, gem_handle);
290 	if (!obj) {
291 		DRM_ERROR("failed to lookup gem object.\n");
292 		return ERR_PTR(-EINVAL);
293 	}
294 
295 	exynos_gem = to_exynos_gem(obj);
296 
297 	return &exynos_gem->dma_addr;
298 }
299 
300 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
301 					unsigned int gem_handle,
302 					struct drm_file *filp)
303 {
304 	struct drm_gem_object *obj;
305 
306 	obj = drm_gem_object_lookup(dev, filp, gem_handle);
307 	if (!obj) {
308 		DRM_ERROR("failed to lookup gem object.\n");
309 		return;
310 	}
311 
312 	drm_gem_object_unreference_unlocked(obj);
313 
314 	/*
315 	 * decrease obj->refcount one more time because we has already
316 	 * increased it at exynos_drm_gem_get_dma_addr().
317 	 */
318 	drm_gem_object_unreference_unlocked(obj);
319 }
320 
321 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
322 				      struct vm_area_struct *vma)
323 {
324 	struct drm_device *drm_dev = exynos_gem->base.dev;
325 	unsigned long vm_size;
326 	int ret;
327 
328 	vma->vm_flags &= ~VM_PFNMAP;
329 	vma->vm_pgoff = 0;
330 
331 	vm_size = vma->vm_end - vma->vm_start;
332 
333 	/* check if user-requested size is valid. */
334 	if (vm_size > exynos_gem->size)
335 		return -EINVAL;
336 
337 	ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->pages,
338 			     exynos_gem->dma_addr, exynos_gem->size,
339 			     &exynos_gem->dma_attrs);
340 	if (ret < 0) {
341 		DRM_ERROR("failed to mmap.\n");
342 		return ret;
343 	}
344 
345 	return 0;
346 }
347 
348 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
349 				      struct drm_file *file_priv)
350 {
351 	struct exynos_drm_gem *exynos_gem;
352 	struct drm_exynos_gem_info *args = data;
353 	struct drm_gem_object *obj;
354 
355 	mutex_lock(&dev->struct_mutex);
356 
357 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
358 	if (!obj) {
359 		DRM_ERROR("failed to lookup gem object.\n");
360 		mutex_unlock(&dev->struct_mutex);
361 		return -EINVAL;
362 	}
363 
364 	exynos_gem = to_exynos_gem(obj);
365 
366 	args->flags = exynos_gem->flags;
367 	args->size = exynos_gem->size;
368 
369 	drm_gem_object_unreference(obj);
370 	mutex_unlock(&dev->struct_mutex);
371 
372 	return 0;
373 }
374 
375 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
376 				struct sg_table *sgt,
377 				enum dma_data_direction dir)
378 {
379 	int nents;
380 
381 	mutex_lock(&drm_dev->struct_mutex);
382 
383 	nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
384 	if (!nents) {
385 		DRM_ERROR("failed to map sgl with dma.\n");
386 		mutex_unlock(&drm_dev->struct_mutex);
387 		return nents;
388 	}
389 
390 	mutex_unlock(&drm_dev->struct_mutex);
391 	return 0;
392 }
393 
394 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
395 				struct sg_table *sgt,
396 				enum dma_data_direction dir)
397 {
398 	dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
399 }
400 
401 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
402 {
403 	exynos_drm_gem_destroy(to_exynos_gem(obj));
404 }
405 
406 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
407 			       struct drm_device *dev,
408 			       struct drm_mode_create_dumb *args)
409 {
410 	struct exynos_drm_gem *exynos_gem;
411 	unsigned int flags;
412 	int ret;
413 
414 	/*
415 	 * allocate memory to be used for framebuffer.
416 	 * - this callback would be called by user application
417 	 *	with DRM_IOCTL_MODE_CREATE_DUMB command.
418 	 */
419 
420 	args->pitch = args->width * ((args->bpp + 7) / 8);
421 	args->size = args->pitch * args->height;
422 
423 	if (is_drm_iommu_supported(dev))
424 		flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
425 	else
426 		flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
427 
428 	exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
429 	if (IS_ERR(exynos_gem)) {
430 		dev_warn(dev->dev, "FB allocation failed.\n");
431 		return PTR_ERR(exynos_gem);
432 	}
433 
434 	ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
435 					   &args->handle);
436 	if (ret) {
437 		exynos_drm_gem_destroy(exynos_gem);
438 		return ret;
439 	}
440 
441 	return 0;
442 }
443 
444 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
445 				   struct drm_device *dev, uint32_t handle,
446 				   uint64_t *offset)
447 {
448 	struct drm_gem_object *obj;
449 	int ret = 0;
450 
451 	mutex_lock(&dev->struct_mutex);
452 
453 	/*
454 	 * get offset of memory allocated for drm framebuffer.
455 	 * - this callback would be called by user application
456 	 *	with DRM_IOCTL_MODE_MAP_DUMB command.
457 	 */
458 
459 	obj = drm_gem_object_lookup(dev, file_priv, handle);
460 	if (!obj) {
461 		DRM_ERROR("failed to lookup gem object.\n");
462 		ret = -EINVAL;
463 		goto unlock;
464 	}
465 
466 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
467 	DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
468 
469 	drm_gem_object_unreference(obj);
470 unlock:
471 	mutex_unlock(&dev->struct_mutex);
472 	return ret;
473 }
474 
475 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
476 {
477 	struct drm_gem_object *obj = vma->vm_private_data;
478 	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
479 	unsigned long pfn;
480 	pgoff_t page_offset;
481 	int ret;
482 
483 	page_offset = ((unsigned long)vmf->virtual_address -
484 			vma->vm_start) >> PAGE_SHIFT;
485 
486 	if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
487 		DRM_ERROR("invalid page offset\n");
488 		ret = -EINVAL;
489 		goto out;
490 	}
491 
492 	pfn = page_to_pfn(exynos_gem->pages[page_offset]);
493 	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
494 
495 out:
496 	switch (ret) {
497 	case 0:
498 	case -ERESTARTSYS:
499 	case -EINTR:
500 		return VM_FAULT_NOPAGE;
501 	case -ENOMEM:
502 		return VM_FAULT_OOM;
503 	default:
504 		return VM_FAULT_SIGBUS;
505 	}
506 }
507 
508 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
509 {
510 	struct exynos_drm_gem *exynos_gem;
511 	struct drm_gem_object *obj;
512 	int ret;
513 
514 	/* set vm_area_struct. */
515 	ret = drm_gem_mmap(filp, vma);
516 	if (ret < 0) {
517 		DRM_ERROR("failed to mmap.\n");
518 		return ret;
519 	}
520 
521 	obj = vma->vm_private_data;
522 	exynos_gem = to_exynos_gem(obj);
523 
524 	DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem->flags);
525 
526 	/* non-cachable as default. */
527 	if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
528 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
529 	else if (exynos_gem->flags & EXYNOS_BO_WC)
530 		vma->vm_page_prot =
531 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
532 	else
533 		vma->vm_page_prot =
534 			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
535 
536 	ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
537 	if (ret)
538 		goto err_close_vm;
539 
540 	return ret;
541 
542 err_close_vm:
543 	drm_gem_vm_close(vma);
544 
545 	return ret;
546 }
547 
548 /* low-level interface prime helpers */
549 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
550 {
551 	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
552 	int npages;
553 
554 	npages = exynos_gem->size >> PAGE_SHIFT;
555 
556 	return drm_prime_pages_to_sg(exynos_gem->pages, npages);
557 }
558 
559 struct drm_gem_object *
560 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
561 				     struct dma_buf_attachment *attach,
562 				     struct sg_table *sgt)
563 {
564 	struct exynos_drm_gem *exynos_gem;
565 	int npages;
566 	int ret;
567 
568 	exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
569 	if (IS_ERR(exynos_gem)) {
570 		ret = PTR_ERR(exynos_gem);
571 		return ERR_PTR(ret);
572 	}
573 
574 	exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
575 
576 	npages = exynos_gem->size >> PAGE_SHIFT;
577 	exynos_gem->pages = drm_malloc_ab(npages, sizeof(struct page *));
578 	if (!exynos_gem->pages) {
579 		ret = -ENOMEM;
580 		goto err;
581 	}
582 
583 	ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
584 					       npages);
585 	if (ret < 0)
586 		goto err_free_large;
587 
588 	exynos_gem->sgt = sgt;
589 
590 	if (sgt->nents == 1) {
591 		/* always physically continuous memory if sgt->nents is 1. */
592 		exynos_gem->flags |= EXYNOS_BO_CONTIG;
593 	} else {
594 		/*
595 		 * this case could be CONTIG or NONCONTIG type but for now
596 		 * sets NONCONTIG.
597 		 * TODO. we have to find a way that exporter can notify
598 		 * the type of its own buffer to importer.
599 		 */
600 		exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
601 	}
602 
603 	return &exynos_gem->base;
604 
605 err_free_large:
606 	drm_free_large(exynos_gem->pages);
607 err:
608 	drm_gem_object_release(&exynos_gem->base);
609 	kfree(exynos_gem);
610 	return ERR_PTR(ret);
611 }
612 
613 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
614 {
615 	return NULL;
616 }
617 
618 void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
619 {
620 	/* Nothing to do */
621 }
622