1 /*
2  * Copyright (C) 2012 Russell King
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 #include <linux/dma-buf.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/shmem_fs.h>
11 #include <drm/drmP.h>
12 #include "armada_drm.h"
13 #include "armada_gem.h"
14 #include <drm/armada_drm.h>
15 #include "armada_ioctlP.h"
16 
17 static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
18 {
19 	struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
20 	unsigned long addr = (unsigned long)vmf->virtual_address;
21 	unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
22 	int ret;
23 
24 	pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
25 	ret = vm_insert_pfn(vma, addr, pfn);
26 
27 	switch (ret) {
28 	case 0:
29 	case -EBUSY:
30 		return VM_FAULT_NOPAGE;
31 	case -ENOMEM:
32 		return VM_FAULT_OOM;
33 	default:
34 		return VM_FAULT_SIGBUS;
35 	}
36 }
37 
38 const struct vm_operations_struct armada_gem_vm_ops = {
39 	.fault	= armada_gem_vm_fault,
40 	.open	= drm_gem_vm_open,
41 	.close	= drm_gem_vm_close,
42 };
43 
44 static size_t roundup_gem_size(size_t size)
45 {
46 	return roundup(size, PAGE_SIZE);
47 }
48 
49 /* dev->struct_mutex is held here */
50 void armada_gem_free_object(struct drm_gem_object *obj)
51 {
52 	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
53 
54 	DRM_DEBUG_DRIVER("release obj %p\n", dobj);
55 
56 	drm_gem_free_mmap_offset(&dobj->obj);
57 
58 	if (dobj->page) {
59 		/* page backed memory */
60 		unsigned int order = get_order(dobj->obj.size);
61 		__free_pages(dobj->page, order);
62 	} else if (dobj->linear) {
63 		/* linear backed memory */
64 		drm_mm_remove_node(dobj->linear);
65 		kfree(dobj->linear);
66 		if (dobj->addr)
67 			iounmap(dobj->addr);
68 	}
69 
70 	if (dobj->obj.import_attach) {
71 		/* We only ever display imported data */
72 		dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt,
73 					 DMA_TO_DEVICE);
74 		drm_prime_gem_destroy(&dobj->obj, NULL);
75 	}
76 
77 	drm_gem_object_release(&dobj->obj);
78 
79 	kfree(dobj);
80 }
81 
82 int
83 armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
84 {
85 	struct armada_private *priv = dev->dev_private;
86 	size_t size = obj->obj.size;
87 
88 	if (obj->page || obj->linear)
89 		return 0;
90 
91 	/*
92 	 * If it is a small allocation (typically cursor, which will
93 	 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
94 	 * Framebuffers will never be this small (our minimum size for
95 	 * framebuffers is larger than this anyway.)  Such objects are
96 	 * only accessed by the CPU so we don't need any special handing
97 	 * here.
98 	 */
99 	if (size <= 8192) {
100 		unsigned int order = get_order(size);
101 		struct page *p = alloc_pages(GFP_KERNEL, order);
102 
103 		if (p) {
104 			obj->addr = page_address(p);
105 			obj->phys_addr = page_to_phys(p);
106 			obj->page = p;
107 
108 			memset(obj->addr, 0, PAGE_ALIGN(size));
109 		}
110 	}
111 
112 	/*
113 	 * We could grab something from CMA if it's enabled, but that
114 	 * involves building in a problem:
115 	 *
116 	 * CMA's interface uses dma_alloc_coherent(), which provides us
117 	 * with an CPU virtual address and a device address.
118 	 *
119 	 * The CPU virtual address may be either an address in the kernel
120 	 * direct mapped region (for example, as it would be on x86) or
121 	 * it may be remapped into another part of kernel memory space
122 	 * (eg, as it would be on ARM.)  This means virt_to_phys() on the
123 	 * returned virtual address is invalid depending on the architecture
124 	 * implementation.
125 	 *
126 	 * The device address may also not be a physical address; it may
127 	 * be that there is some kind of remapping between the device and
128 	 * system RAM, which makes the use of the device address also
129 	 * unsafe to re-use as a physical address.
130 	 *
131 	 * This makes DRM usage of dma_alloc_coherent() in a generic way
132 	 * at best very questionable and unsafe.
133 	 */
134 
135 	/* Otherwise, grab it from our linear allocation */
136 	if (!obj->page) {
137 		struct drm_mm_node *node;
138 		unsigned align = min_t(unsigned, size, SZ_2M);
139 		void __iomem *ptr;
140 		int ret;
141 
142 		node = kzalloc(sizeof(*node), GFP_KERNEL);
143 		if (!node)
144 			return -ENOSPC;
145 
146 		mutex_lock(&dev->struct_mutex);
147 		ret = drm_mm_insert_node(&priv->linear, node, size, align,
148 					 DRM_MM_SEARCH_DEFAULT);
149 		mutex_unlock(&dev->struct_mutex);
150 		if (ret) {
151 			kfree(node);
152 			return ret;
153 		}
154 
155 		obj->linear = node;
156 
157 		/* Ensure that the memory we're returning is cleared. */
158 		ptr = ioremap_wc(obj->linear->start, size);
159 		if (!ptr) {
160 			mutex_lock(&dev->struct_mutex);
161 			drm_mm_remove_node(obj->linear);
162 			mutex_unlock(&dev->struct_mutex);
163 			kfree(obj->linear);
164 			obj->linear = NULL;
165 			return -ENOMEM;
166 		}
167 
168 		memset_io(ptr, 0, size);
169 		iounmap(ptr);
170 
171 		obj->phys_addr = obj->linear->start;
172 		obj->dev_addr = obj->linear->start;
173 	}
174 
175 	DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
176 			 (unsigned long long)obj->phys_addr,
177 			 (unsigned long long)obj->dev_addr);
178 
179 	return 0;
180 }
181 
182 void *
183 armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
184 {
185 	/* only linear objects need to be ioremap'd */
186 	if (!dobj->addr && dobj->linear)
187 		dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
188 	return dobj->addr;
189 }
190 
191 struct armada_gem_object *
192 armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
193 {
194 	struct armada_gem_object *obj;
195 
196 	size = roundup_gem_size(size);
197 
198 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
199 	if (!obj)
200 		return NULL;
201 
202 	drm_gem_private_object_init(dev, &obj->obj, size);
203 	obj->dev_addr = DMA_ERROR_CODE;
204 
205 	DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
206 
207 	return obj;
208 }
209 
210 struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
211 	size_t size)
212 {
213 	struct armada_gem_object *obj;
214 	struct address_space *mapping;
215 
216 	size = roundup_gem_size(size);
217 
218 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
219 	if (!obj)
220 		return NULL;
221 
222 	if (drm_gem_object_init(dev, &obj->obj, size)) {
223 		kfree(obj);
224 		return NULL;
225 	}
226 
227 	obj->dev_addr = DMA_ERROR_CODE;
228 
229 	mapping = obj->obj.filp->f_path.dentry->d_inode->i_mapping;
230 	mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
231 
232 	DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
233 
234 	return obj;
235 }
236 
237 /* Dumb alloc support */
238 int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
239 	struct drm_mode_create_dumb *args)
240 {
241 	struct armada_gem_object *dobj;
242 	u32 handle;
243 	size_t size;
244 	int ret;
245 
246 	args->pitch = armada_pitch(args->width, args->bpp);
247 	args->size = size = args->pitch * args->height;
248 
249 	dobj = armada_gem_alloc_private_object(dev, size);
250 	if (dobj == NULL)
251 		return -ENOMEM;
252 
253 	ret = armada_gem_linear_back(dev, dobj);
254 	if (ret)
255 		goto err;
256 
257 	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
258 	if (ret)
259 		goto err;
260 
261 	args->handle = handle;
262 
263 	/* drop reference from allocate - handle holds it now */
264 	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
265  err:
266 	drm_gem_object_unreference_unlocked(&dobj->obj);
267 	return ret;
268 }
269 
270 int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
271 	uint32_t handle, uint64_t *offset)
272 {
273 	struct armada_gem_object *obj;
274 	int ret = 0;
275 
276 	mutex_lock(&dev->struct_mutex);
277 	obj = armada_gem_object_lookup(dev, file, handle);
278 	if (!obj) {
279 		DRM_ERROR("failed to lookup gem object\n");
280 		ret = -EINVAL;
281 		goto err_unlock;
282 	}
283 
284 	/* Don't allow imported objects to be mapped */
285 	if (obj->obj.import_attach) {
286 		ret = -EINVAL;
287 		goto err_unlock;
288 	}
289 
290 	ret = drm_gem_create_mmap_offset(&obj->obj);
291 	if (ret == 0) {
292 		*offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
293 		DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
294 	}
295 
296 	drm_gem_object_unreference(&obj->obj);
297  err_unlock:
298 	mutex_unlock(&dev->struct_mutex);
299 
300 	return ret;
301 }
302 
303 int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
304 	uint32_t handle)
305 {
306 	return drm_gem_handle_delete(file, handle);
307 }
308 
309 /* Private driver gem ioctls */
310 int armada_gem_create_ioctl(struct drm_device *dev, void *data,
311 	struct drm_file *file)
312 {
313 	struct drm_armada_gem_create *args = data;
314 	struct armada_gem_object *dobj;
315 	size_t size;
316 	u32 handle;
317 	int ret;
318 
319 	if (args->size == 0)
320 		return -ENOMEM;
321 
322 	size = args->size;
323 
324 	dobj = armada_gem_alloc_object(dev, size);
325 	if (dobj == NULL)
326 		return -ENOMEM;
327 
328 	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
329 	if (ret)
330 		goto err;
331 
332 	args->handle = handle;
333 
334 	/* drop reference from allocate - handle holds it now */
335 	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
336  err:
337 	drm_gem_object_unreference_unlocked(&dobj->obj);
338 	return ret;
339 }
340 
341 /* Map a shmem-backed object into process memory space */
342 int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
343 	struct drm_file *file)
344 {
345 	struct drm_armada_gem_mmap *args = data;
346 	struct armada_gem_object *dobj;
347 	unsigned long addr;
348 
349 	dobj = armada_gem_object_lookup(dev, file, args->handle);
350 	if (dobj == NULL)
351 		return -ENOENT;
352 
353 	if (!dobj->obj.filp) {
354 		drm_gem_object_unreference(&dobj->obj);
355 		return -EINVAL;
356 	}
357 
358 	addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
359 		       MAP_SHARED, args->offset);
360 	drm_gem_object_unreference(&dobj->obj);
361 	if (IS_ERR_VALUE(addr))
362 		return addr;
363 
364 	args->addr = addr;
365 
366 	return 0;
367 }
368 
369 int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
370 	struct drm_file *file)
371 {
372 	struct drm_armada_gem_pwrite *args = data;
373 	struct armada_gem_object *dobj;
374 	char __user *ptr;
375 	int ret;
376 
377 	DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
378 		args->handle, args->offset, args->size, args->ptr);
379 
380 	if (args->size == 0)
381 		return 0;
382 
383 	ptr = (char __user *)(uintptr_t)args->ptr;
384 
385 	if (!access_ok(VERIFY_READ, ptr, args->size))
386 		return -EFAULT;
387 
388 	ret = fault_in_multipages_readable(ptr, args->size);
389 	if (ret)
390 		return ret;
391 
392 	dobj = armada_gem_object_lookup(dev, file, args->handle);
393 	if (dobj == NULL)
394 		return -ENOENT;
395 
396 	/* Must be a kernel-mapped object */
397 	if (!dobj->addr)
398 		return -EINVAL;
399 
400 	if (args->offset > dobj->obj.size ||
401 	    args->size > dobj->obj.size - args->offset) {
402 		DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
403 		ret = -EINVAL;
404 		goto unref;
405 	}
406 
407 	if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
408 		ret = -EFAULT;
409 	} else if (dobj->update) {
410 		dobj->update(dobj->update_data);
411 		ret = 0;
412 	}
413 
414  unref:
415 	drm_gem_object_unreference_unlocked(&dobj->obj);
416 	return ret;
417 }
418 
419 /* Prime support */
420 struct sg_table *
421 armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
422 	enum dma_data_direction dir)
423 {
424 	struct drm_gem_object *obj = attach->dmabuf->priv;
425 	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
426 	struct scatterlist *sg;
427 	struct sg_table *sgt;
428 	int i, num;
429 
430 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
431 	if (!sgt)
432 		return NULL;
433 
434 	if (dobj->obj.filp) {
435 		struct address_space *mapping;
436 		int count;
437 
438 		count = dobj->obj.size / PAGE_SIZE;
439 		if (sg_alloc_table(sgt, count, GFP_KERNEL))
440 			goto free_sgt;
441 
442 		mapping = file_inode(dobj->obj.filp)->i_mapping;
443 
444 		for_each_sg(sgt->sgl, sg, count, i) {
445 			struct page *page;
446 
447 			page = shmem_read_mapping_page(mapping, i);
448 			if (IS_ERR(page)) {
449 				num = i;
450 				goto release;
451 			}
452 
453 			sg_set_page(sg, page, PAGE_SIZE, 0);
454 		}
455 
456 		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
457 			num = sgt->nents;
458 			goto release;
459 		}
460 	} else if (dobj->page) {
461 		/* Single contiguous page */
462 		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
463 			goto free_sgt;
464 
465 		sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
466 
467 		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
468 			goto free_table;
469 	} else if (dobj->linear) {
470 		/* Single contiguous physical region - no struct page */
471 		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
472 			goto free_sgt;
473 		sg_dma_address(sgt->sgl) = dobj->dev_addr;
474 		sg_dma_len(sgt->sgl) = dobj->obj.size;
475 	} else {
476 		goto free_sgt;
477 	}
478 	return sgt;
479 
480  release:
481 	for_each_sg(sgt->sgl, sg, num, i)
482 		page_cache_release(sg_page(sg));
483  free_table:
484 	sg_free_table(sgt);
485  free_sgt:
486 	kfree(sgt);
487 	return NULL;
488 }
489 
490 static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
491 	struct sg_table *sgt, enum dma_data_direction dir)
492 {
493 	struct drm_gem_object *obj = attach->dmabuf->priv;
494 	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
495 	int i;
496 
497 	if (!dobj->linear)
498 		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
499 
500 	if (dobj->obj.filp) {
501 		struct scatterlist *sg;
502 		for_each_sg(sgt->sgl, sg, sgt->nents, i)
503 			page_cache_release(sg_page(sg));
504 	}
505 
506 	sg_free_table(sgt);
507 	kfree(sgt);
508 }
509 
510 static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
511 {
512 	return NULL;
513 }
514 
515 static void
516 armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
517 {
518 }
519 
520 static int
521 armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
522 {
523 	return -EINVAL;
524 }
525 
526 static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
527 	.map_dma_buf	= armada_gem_prime_map_dma_buf,
528 	.unmap_dma_buf	= armada_gem_prime_unmap_dma_buf,
529 	.release	= drm_gem_dmabuf_release,
530 	.kmap_atomic	= armada_gem_dmabuf_no_kmap,
531 	.kunmap_atomic	= armada_gem_dmabuf_no_kunmap,
532 	.kmap		= armada_gem_dmabuf_no_kmap,
533 	.kunmap		= armada_gem_dmabuf_no_kunmap,
534 	.mmap		= armada_gem_dmabuf_mmap,
535 };
536 
537 struct dma_buf *
538 armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
539 	int flags)
540 {
541 	return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size,
542 			      O_RDWR, NULL);
543 }
544 
545 struct drm_gem_object *
546 armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
547 {
548 	struct dma_buf_attachment *attach;
549 	struct armada_gem_object *dobj;
550 
551 	if (buf->ops == &armada_gem_prime_dmabuf_ops) {
552 		struct drm_gem_object *obj = buf->priv;
553 		if (obj->dev == dev) {
554 			/*
555 			 * Importing our own dmabuf(s) increases the
556 			 * refcount on the gem object itself.
557 			 */
558 			drm_gem_object_reference(obj);
559 			return obj;
560 		}
561 	}
562 
563 	attach = dma_buf_attach(buf, dev->dev);
564 	if (IS_ERR(attach))
565 		return ERR_CAST(attach);
566 
567 	dobj = armada_gem_alloc_private_object(dev, buf->size);
568 	if (!dobj) {
569 		dma_buf_detach(buf, attach);
570 		return ERR_PTR(-ENOMEM);
571 	}
572 
573 	dobj->obj.import_attach = attach;
574 	get_dma_buf(buf);
575 
576 	/*
577 	 * Don't call dma_buf_map_attachment() here - it maps the
578 	 * scatterlist immediately for DMA, and this is not always
579 	 * an appropriate thing to do.
580 	 */
581 	return &dobj->obj;
582 }
583 
584 int armada_gem_map_import(struct armada_gem_object *dobj)
585 {
586 	int ret;
587 
588 	dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
589 					  DMA_TO_DEVICE);
590 	if (!dobj->sgt) {
591 		DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
592 		return -EINVAL;
593 	}
594 	if (IS_ERR(dobj->sgt)) {
595 		ret = PTR_ERR(dobj->sgt);
596 		dobj->sgt = NULL;
597 		DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
598 		return ret;
599 	}
600 	if (dobj->sgt->nents > 1) {
601 		DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
602 		return -EINVAL;
603 	}
604 	if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
605 		DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
606 		return -EINVAL;
607 	}
608 	dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
609 	return 0;
610 }
611