1 /* exynos_drm_gem.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11 
12 #include <drm/drmP.h>
13 #include <drm/drm_vma_manager.h>
14 
15 #include <linux/shmem_fs.h>
16 #include <linux/dma-buf.h>
17 #include <linux/pfn_t.h>
18 #include <drm/exynos_drm.h>
19 
20 #include "exynos_drm_drv.h"
21 #include "exynos_drm_gem.h"
22 #include "exynos_drm_iommu.h"
23 
24 static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
25 {
26 	struct drm_device *dev = exynos_gem->base.dev;
27 	enum dma_attr attr;
28 	unsigned int nr_pages;
29 	struct sg_table sgt;
30 	int ret = -ENOMEM;
31 
32 	if (exynos_gem->dma_addr) {
33 		DRM_DEBUG_KMS("already allocated.\n");
34 		return 0;
35 	}
36 
37 	init_dma_attrs(&exynos_gem->dma_attrs);
38 
39 	/*
40 	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
41 	 * region will be allocated else physically contiguous
42 	 * as possible.
43 	 */
44 	if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
45 		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &exynos_gem->dma_attrs);
46 
47 	/*
48 	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
49 	 * else cachable mapping.
50 	 */
51 	if (exynos_gem->flags & EXYNOS_BO_WC ||
52 			!(exynos_gem->flags & EXYNOS_BO_CACHABLE))
53 		attr = DMA_ATTR_WRITE_COMBINE;
54 	else
55 		attr = DMA_ATTR_NON_CONSISTENT;
56 
57 	dma_set_attr(attr, &exynos_gem->dma_attrs);
58 	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &exynos_gem->dma_attrs);
59 
60 	nr_pages = exynos_gem->size >> PAGE_SHIFT;
61 
62 	exynos_gem->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
63 	if (!exynos_gem->pages) {
64 		DRM_ERROR("failed to allocate pages.\n");
65 		return -ENOMEM;
66 	}
67 
68 	exynos_gem->cookie = dma_alloc_attrs(dev->dev, exynos_gem->size,
69 					     &exynos_gem->dma_addr, GFP_KERNEL,
70 					     &exynos_gem->dma_attrs);
71 	if (!exynos_gem->cookie) {
72 		DRM_ERROR("failed to allocate buffer.\n");
73 		goto err_free;
74 	}
75 
76 	ret = dma_get_sgtable_attrs(dev->dev, &sgt, exynos_gem->cookie,
77 				    exynos_gem->dma_addr, exynos_gem->size,
78 				    &exynos_gem->dma_attrs);
79 	if (ret < 0) {
80 		DRM_ERROR("failed to get sgtable.\n");
81 		goto err_dma_free;
82 	}
83 
84 	if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
85 					     nr_pages)) {
86 		DRM_ERROR("invalid sgtable.\n");
87 		ret = -EINVAL;
88 		goto err_sgt_free;
89 	}
90 
91 	sg_free_table(&sgt);
92 
93 	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
94 			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
95 
96 	return 0;
97 
98 err_sgt_free:
99 	sg_free_table(&sgt);
100 err_dma_free:
101 	dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie,
102 		       exynos_gem->dma_addr, &exynos_gem->dma_attrs);
103 err_free:
104 	drm_free_large(exynos_gem->pages);
105 
106 	return ret;
107 }
108 
109 static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
110 {
111 	struct drm_device *dev = exynos_gem->base.dev;
112 
113 	if (!exynos_gem->dma_addr) {
114 		DRM_DEBUG_KMS("dma_addr is invalid.\n");
115 		return;
116 	}
117 
118 	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
119 			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
120 
121 	dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie,
122 			(dma_addr_t)exynos_gem->dma_addr,
123 			&exynos_gem->dma_attrs);
124 
125 	drm_free_large(exynos_gem->pages);
126 }
127 
128 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
129 					struct drm_file *file_priv,
130 					unsigned int *handle)
131 {
132 	int ret;
133 
134 	/*
135 	 * allocate a id of idr table where the obj is registered
136 	 * and handle has the id what user can see.
137 	 */
138 	ret = drm_gem_handle_create(file_priv, obj, handle);
139 	if (ret)
140 		return ret;
141 
142 	DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
143 
144 	/* drop reference from allocate - handle holds it now. */
145 	drm_gem_object_unreference_unlocked(obj);
146 
147 	return 0;
148 }
149 
150 void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
151 {
152 	struct drm_gem_object *obj = &exynos_gem->base;
153 
154 	DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
155 
156 	/*
157 	 * do not release memory region from exporter.
158 	 *
159 	 * the region will be released by exporter
160 	 * once dmabuf's refcount becomes 0.
161 	 */
162 	if (obj->import_attach)
163 		drm_prime_gem_destroy(obj, exynos_gem->sgt);
164 	else
165 		exynos_drm_free_buf(exynos_gem);
166 
167 	/* release file pointer to gem object. */
168 	drm_gem_object_release(obj);
169 
170 	kfree(exynos_gem);
171 }
172 
173 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
174 						unsigned int gem_handle,
175 						struct drm_file *file_priv)
176 {
177 	struct exynos_drm_gem *exynos_gem;
178 	struct drm_gem_object *obj;
179 
180 	obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
181 	if (!obj) {
182 		DRM_ERROR("failed to lookup gem object.\n");
183 		return 0;
184 	}
185 
186 	exynos_gem = to_exynos_gem(obj);
187 
188 	drm_gem_object_unreference_unlocked(obj);
189 
190 	return exynos_gem->size;
191 }
192 
193 static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
194 						  unsigned long size)
195 {
196 	struct exynos_drm_gem *exynos_gem;
197 	struct drm_gem_object *obj;
198 	int ret;
199 
200 	exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
201 	if (!exynos_gem)
202 		return ERR_PTR(-ENOMEM);
203 
204 	exynos_gem->size = size;
205 	obj = &exynos_gem->base;
206 
207 	ret = drm_gem_object_init(dev, obj, size);
208 	if (ret < 0) {
209 		DRM_ERROR("failed to initialize gem object\n");
210 		kfree(exynos_gem);
211 		return ERR_PTR(ret);
212 	}
213 
214 	ret = drm_gem_create_mmap_offset(obj);
215 	if (ret < 0) {
216 		drm_gem_object_release(obj);
217 		kfree(exynos_gem);
218 		return ERR_PTR(ret);
219 	}
220 
221 	DRM_DEBUG_KMS("created file object = %p\n", obj->filp);
222 
223 	return exynos_gem;
224 }
225 
226 struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
227 					     unsigned int flags,
228 					     unsigned long size)
229 {
230 	struct exynos_drm_gem *exynos_gem;
231 	int ret;
232 
233 	if (flags & ~(EXYNOS_BO_MASK)) {
234 		DRM_ERROR("invalid flags.\n");
235 		return ERR_PTR(-EINVAL);
236 	}
237 
238 	if (!size) {
239 		DRM_ERROR("invalid size.\n");
240 		return ERR_PTR(-EINVAL);
241 	}
242 
243 	size = roundup(size, PAGE_SIZE);
244 
245 	exynos_gem = exynos_drm_gem_init(dev, size);
246 	if (IS_ERR(exynos_gem))
247 		return exynos_gem;
248 
249 	/* set memory type and cache attribute from user side. */
250 	exynos_gem->flags = flags;
251 
252 	ret = exynos_drm_alloc_buf(exynos_gem);
253 	if (ret < 0) {
254 		drm_gem_object_release(&exynos_gem->base);
255 		kfree(exynos_gem);
256 		return ERR_PTR(ret);
257 	}
258 
259 	return exynos_gem;
260 }
261 
262 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
263 				struct drm_file *file_priv)
264 {
265 	struct drm_exynos_gem_create *args = data;
266 	struct exynos_drm_gem *exynos_gem;
267 	int ret;
268 
269 	exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
270 	if (IS_ERR(exynos_gem))
271 		return PTR_ERR(exynos_gem);
272 
273 	ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
274 					   &args->handle);
275 	if (ret) {
276 		exynos_drm_gem_destroy(exynos_gem);
277 		return ret;
278 	}
279 
280 	return 0;
281 }
282 
283 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
284 					unsigned int gem_handle,
285 					struct drm_file *filp)
286 {
287 	struct exynos_drm_gem *exynos_gem;
288 	struct drm_gem_object *obj;
289 
290 	obj = drm_gem_object_lookup(dev, filp, gem_handle);
291 	if (!obj) {
292 		DRM_ERROR("failed to lookup gem object.\n");
293 		return ERR_PTR(-EINVAL);
294 	}
295 
296 	exynos_gem = to_exynos_gem(obj);
297 
298 	return &exynos_gem->dma_addr;
299 }
300 
301 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
302 					unsigned int gem_handle,
303 					struct drm_file *filp)
304 {
305 	struct drm_gem_object *obj;
306 
307 	obj = drm_gem_object_lookup(dev, filp, gem_handle);
308 	if (!obj) {
309 		DRM_ERROR("failed to lookup gem object.\n");
310 		return;
311 	}
312 
313 	drm_gem_object_unreference_unlocked(obj);
314 
315 	/*
316 	 * decrease obj->refcount one more time because we has already
317 	 * increased it at exynos_drm_gem_get_dma_addr().
318 	 */
319 	drm_gem_object_unreference_unlocked(obj);
320 }
321 
322 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
323 				      struct vm_area_struct *vma)
324 {
325 	struct drm_device *drm_dev = exynos_gem->base.dev;
326 	unsigned long vm_size;
327 	int ret;
328 
329 	vma->vm_flags &= ~VM_PFNMAP;
330 	vma->vm_pgoff = 0;
331 
332 	vm_size = vma->vm_end - vma->vm_start;
333 
334 	/* check if user-requested size is valid. */
335 	if (vm_size > exynos_gem->size)
336 		return -EINVAL;
337 
338 	ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->cookie,
339 			     exynos_gem->dma_addr, exynos_gem->size,
340 			     &exynos_gem->dma_attrs);
341 	if (ret < 0) {
342 		DRM_ERROR("failed to mmap.\n");
343 		return ret;
344 	}
345 
346 	return 0;
347 }
348 
349 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
350 				      struct drm_file *file_priv)
351 {
352 	struct exynos_drm_gem *exynos_gem;
353 	struct drm_exynos_gem_info *args = data;
354 	struct drm_gem_object *obj;
355 
356 	mutex_lock(&dev->struct_mutex);
357 
358 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
359 	if (!obj) {
360 		DRM_ERROR("failed to lookup gem object.\n");
361 		mutex_unlock(&dev->struct_mutex);
362 		return -EINVAL;
363 	}
364 
365 	exynos_gem = to_exynos_gem(obj);
366 
367 	args->flags = exynos_gem->flags;
368 	args->size = exynos_gem->size;
369 
370 	drm_gem_object_unreference(obj);
371 	mutex_unlock(&dev->struct_mutex);
372 
373 	return 0;
374 }
375 
376 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
377 				struct sg_table *sgt,
378 				enum dma_data_direction dir)
379 {
380 	int nents;
381 
382 	mutex_lock(&drm_dev->struct_mutex);
383 
384 	nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
385 	if (!nents) {
386 		DRM_ERROR("failed to map sgl with dma.\n");
387 		mutex_unlock(&drm_dev->struct_mutex);
388 		return nents;
389 	}
390 
391 	mutex_unlock(&drm_dev->struct_mutex);
392 	return 0;
393 }
394 
395 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
396 				struct sg_table *sgt,
397 				enum dma_data_direction dir)
398 {
399 	dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
400 }
401 
402 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
403 {
404 	exynos_drm_gem_destroy(to_exynos_gem(obj));
405 }
406 
407 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
408 			       struct drm_device *dev,
409 			       struct drm_mode_create_dumb *args)
410 {
411 	struct exynos_drm_gem *exynos_gem;
412 	unsigned int flags;
413 	int ret;
414 
415 	/*
416 	 * allocate memory to be used for framebuffer.
417 	 * - this callback would be called by user application
418 	 *	with DRM_IOCTL_MODE_CREATE_DUMB command.
419 	 */
420 
421 	args->pitch = args->width * ((args->bpp + 7) / 8);
422 	args->size = args->pitch * args->height;
423 
424 	if (is_drm_iommu_supported(dev))
425 		flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
426 	else
427 		flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
428 
429 	exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
430 	if (IS_ERR(exynos_gem)) {
431 		dev_warn(dev->dev, "FB allocation failed.\n");
432 		return PTR_ERR(exynos_gem);
433 	}
434 
435 	ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
436 					   &args->handle);
437 	if (ret) {
438 		exynos_drm_gem_destroy(exynos_gem);
439 		return ret;
440 	}
441 
442 	return 0;
443 }
444 
445 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
446 				   struct drm_device *dev, uint32_t handle,
447 				   uint64_t *offset)
448 {
449 	struct drm_gem_object *obj;
450 	int ret = 0;
451 
452 	mutex_lock(&dev->struct_mutex);
453 
454 	/*
455 	 * get offset of memory allocated for drm framebuffer.
456 	 * - this callback would be called by user application
457 	 *	with DRM_IOCTL_MODE_MAP_DUMB command.
458 	 */
459 
460 	obj = drm_gem_object_lookup(dev, file_priv, handle);
461 	if (!obj) {
462 		DRM_ERROR("failed to lookup gem object.\n");
463 		ret = -EINVAL;
464 		goto unlock;
465 	}
466 
467 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
468 	DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
469 
470 	drm_gem_object_unreference(obj);
471 unlock:
472 	mutex_unlock(&dev->struct_mutex);
473 	return ret;
474 }
475 
476 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
477 {
478 	struct drm_gem_object *obj = vma->vm_private_data;
479 	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
480 	unsigned long pfn;
481 	pgoff_t page_offset;
482 	int ret;
483 
484 	page_offset = ((unsigned long)vmf->virtual_address -
485 			vma->vm_start) >> PAGE_SHIFT;
486 
487 	if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
488 		DRM_ERROR("invalid page offset\n");
489 		ret = -EINVAL;
490 		goto out;
491 	}
492 
493 	pfn = page_to_pfn(exynos_gem->pages[page_offset]);
494 	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
495 			__pfn_to_pfn_t(pfn, PFN_DEV));
496 
497 out:
498 	switch (ret) {
499 	case 0:
500 	case -ERESTARTSYS:
501 	case -EINTR:
502 		return VM_FAULT_NOPAGE;
503 	case -ENOMEM:
504 		return VM_FAULT_OOM;
505 	default:
506 		return VM_FAULT_SIGBUS;
507 	}
508 }
509 
510 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
511 {
512 	struct exynos_drm_gem *exynos_gem;
513 	struct drm_gem_object *obj;
514 	int ret;
515 
516 	/* set vm_area_struct. */
517 	ret = drm_gem_mmap(filp, vma);
518 	if (ret < 0) {
519 		DRM_ERROR("failed to mmap.\n");
520 		return ret;
521 	}
522 
523 	obj = vma->vm_private_data;
524 	exynos_gem = to_exynos_gem(obj);
525 
526 	DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem->flags);
527 
528 	/* non-cachable as default. */
529 	if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
530 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
531 	else if (exynos_gem->flags & EXYNOS_BO_WC)
532 		vma->vm_page_prot =
533 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
534 	else
535 		vma->vm_page_prot =
536 			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
537 
538 	ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
539 	if (ret)
540 		goto err_close_vm;
541 
542 	return ret;
543 
544 err_close_vm:
545 	drm_gem_vm_close(vma);
546 
547 	return ret;
548 }
549 
550 /* low-level interface prime helpers */
551 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
552 {
553 	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
554 	int npages;
555 
556 	npages = exynos_gem->size >> PAGE_SHIFT;
557 
558 	return drm_prime_pages_to_sg(exynos_gem->pages, npages);
559 }
560 
561 struct drm_gem_object *
562 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
563 				     struct dma_buf_attachment *attach,
564 				     struct sg_table *sgt)
565 {
566 	struct exynos_drm_gem *exynos_gem;
567 	int npages;
568 	int ret;
569 
570 	exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
571 	if (IS_ERR(exynos_gem)) {
572 		ret = PTR_ERR(exynos_gem);
573 		return ERR_PTR(ret);
574 	}
575 
576 	exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
577 
578 	npages = exynos_gem->size >> PAGE_SHIFT;
579 	exynos_gem->pages = drm_malloc_ab(npages, sizeof(struct page *));
580 	if (!exynos_gem->pages) {
581 		ret = -ENOMEM;
582 		goto err;
583 	}
584 
585 	ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
586 					       npages);
587 	if (ret < 0)
588 		goto err_free_large;
589 
590 	exynos_gem->sgt = sgt;
591 
592 	if (sgt->nents == 1) {
593 		/* always physically continuous memory if sgt->nents is 1. */
594 		exynos_gem->flags |= EXYNOS_BO_CONTIG;
595 	} else {
596 		/*
597 		 * this case could be CONTIG or NONCONTIG type but for now
598 		 * sets NONCONTIG.
599 		 * TODO. we have to find a way that exporter can notify
600 		 * the type of its own buffer to importer.
601 		 */
602 		exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
603 	}
604 
605 	return &exynos_gem->base;
606 
607 err_free_large:
608 	drm_free_large(exynos_gem->pages);
609 err:
610 	drm_gem_object_release(&exynos_gem->base);
611 	kfree(exynos_gem);
612 	return ERR_PTR(ret);
613 }
614 
615 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
616 {
617 	return NULL;
618 }
619 
620 void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
621 {
622 	/* Nothing to do */
623 }
624