1 /*
2  * Copyright (C) 2012 Russell King
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 #include <linux/dma-buf.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/shmem_fs.h>
11 #include "armada_drm.h"
12 #include "armada_gem.h"
13 #include <drm/armada_drm.h>
14 #include "armada_ioctlP.h"
15 
16 static int armada_gem_vm_fault(struct vm_fault *vmf)
17 {
18 	struct drm_gem_object *gobj = vmf->vma->vm_private_data;
19 	struct armada_gem_object *obj = drm_to_armada_gem(gobj);
20 	unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
21 	int ret;
22 
23 	pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
24 	ret = vm_insert_pfn(vmf->vma, vmf->address, pfn);
25 
26 	switch (ret) {
27 	case 0:
28 	case -EBUSY:
29 		return VM_FAULT_NOPAGE;
30 	case -ENOMEM:
31 		return VM_FAULT_OOM;
32 	default:
33 		return VM_FAULT_SIGBUS;
34 	}
35 }
36 
37 const struct vm_operations_struct armada_gem_vm_ops = {
38 	.fault	= armada_gem_vm_fault,
39 	.open	= drm_gem_vm_open,
40 	.close	= drm_gem_vm_close,
41 };
42 
43 static size_t roundup_gem_size(size_t size)
44 {
45 	return roundup(size, PAGE_SIZE);
46 }
47 
48 void armada_gem_free_object(struct drm_gem_object *obj)
49 {
50 	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
51 	struct armada_private *priv = obj->dev->dev_private;
52 
53 	DRM_DEBUG_DRIVER("release obj %p\n", dobj);
54 
55 	drm_gem_free_mmap_offset(&dobj->obj);
56 
57 	might_lock(&priv->linear_lock);
58 
59 	if (dobj->page) {
60 		/* page backed memory */
61 		unsigned int order = get_order(dobj->obj.size);
62 		__free_pages(dobj->page, order);
63 	} else if (dobj->linear) {
64 		/* linear backed memory */
65 		mutex_lock(&priv->linear_lock);
66 		drm_mm_remove_node(dobj->linear);
67 		mutex_unlock(&priv->linear_lock);
68 		kfree(dobj->linear);
69 		if (dobj->addr)
70 			iounmap(dobj->addr);
71 	}
72 
73 	if (dobj->obj.import_attach) {
74 		/* We only ever display imported data */
75 		if (dobj->sgt)
76 			dma_buf_unmap_attachment(dobj->obj.import_attach,
77 						 dobj->sgt, DMA_TO_DEVICE);
78 		drm_prime_gem_destroy(&dobj->obj, NULL);
79 	}
80 
81 	drm_gem_object_release(&dobj->obj);
82 
83 	kfree(dobj);
84 }
85 
86 int
87 armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
88 {
89 	struct armada_private *priv = dev->dev_private;
90 	size_t size = obj->obj.size;
91 
92 	if (obj->page || obj->linear)
93 		return 0;
94 
95 	/*
96 	 * If it is a small allocation (typically cursor, which will
97 	 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
98 	 * Framebuffers will never be this small (our minimum size for
99 	 * framebuffers is larger than this anyway.)  Such objects are
100 	 * only accessed by the CPU so we don't need any special handing
101 	 * here.
102 	 */
103 	if (size <= 8192) {
104 		unsigned int order = get_order(size);
105 		struct page *p = alloc_pages(GFP_KERNEL, order);
106 
107 		if (p) {
108 			obj->addr = page_address(p);
109 			obj->phys_addr = page_to_phys(p);
110 			obj->page = p;
111 
112 			memset(obj->addr, 0, PAGE_ALIGN(size));
113 		}
114 	}
115 
116 	/*
117 	 * We could grab something from CMA if it's enabled, but that
118 	 * involves building in a problem:
119 	 *
120 	 * CMA's interface uses dma_alloc_coherent(), which provides us
121 	 * with an CPU virtual address and a device address.
122 	 *
123 	 * The CPU virtual address may be either an address in the kernel
124 	 * direct mapped region (for example, as it would be on x86) or
125 	 * it may be remapped into another part of kernel memory space
126 	 * (eg, as it would be on ARM.)  This means virt_to_phys() on the
127 	 * returned virtual address is invalid depending on the architecture
128 	 * implementation.
129 	 *
130 	 * The device address may also not be a physical address; it may
131 	 * be that there is some kind of remapping between the device and
132 	 * system RAM, which makes the use of the device address also
133 	 * unsafe to re-use as a physical address.
134 	 *
135 	 * This makes DRM usage of dma_alloc_coherent() in a generic way
136 	 * at best very questionable and unsafe.
137 	 */
138 
139 	/* Otherwise, grab it from our linear allocation */
140 	if (!obj->page) {
141 		struct drm_mm_node *node;
142 		unsigned align = min_t(unsigned, size, SZ_2M);
143 		void __iomem *ptr;
144 		int ret;
145 
146 		node = kzalloc(sizeof(*node), GFP_KERNEL);
147 		if (!node)
148 			return -ENOSPC;
149 
150 		mutex_lock(&priv->linear_lock);
151 		ret = drm_mm_insert_node_generic(&priv->linear, node,
152 						 size, align, 0, 0);
153 		mutex_unlock(&priv->linear_lock);
154 		if (ret) {
155 			kfree(node);
156 			return ret;
157 		}
158 
159 		obj->linear = node;
160 
161 		/* Ensure that the memory we're returning is cleared. */
162 		ptr = ioremap_wc(obj->linear->start, size);
163 		if (!ptr) {
164 			mutex_lock(&priv->linear_lock);
165 			drm_mm_remove_node(obj->linear);
166 			mutex_unlock(&priv->linear_lock);
167 			kfree(obj->linear);
168 			obj->linear = NULL;
169 			return -ENOMEM;
170 		}
171 
172 		memset_io(ptr, 0, size);
173 		iounmap(ptr);
174 
175 		obj->phys_addr = obj->linear->start;
176 		obj->dev_addr = obj->linear->start;
177 		obj->mapped = true;
178 	}
179 
180 	DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
181 			 (unsigned long long)obj->phys_addr,
182 			 (unsigned long long)obj->dev_addr);
183 
184 	return 0;
185 }
186 
187 void *
188 armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
189 {
190 	/* only linear objects need to be ioremap'd */
191 	if (!dobj->addr && dobj->linear)
192 		dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
193 	return dobj->addr;
194 }
195 
196 struct armada_gem_object *
197 armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
198 {
199 	struct armada_gem_object *obj;
200 
201 	size = roundup_gem_size(size);
202 
203 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
204 	if (!obj)
205 		return NULL;
206 
207 	drm_gem_private_object_init(dev, &obj->obj, size);
208 
209 	DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
210 
211 	return obj;
212 }
213 
214 static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
215 	size_t size)
216 {
217 	struct armada_gem_object *obj;
218 	struct address_space *mapping;
219 
220 	size = roundup_gem_size(size);
221 
222 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
223 	if (!obj)
224 		return NULL;
225 
226 	if (drm_gem_object_init(dev, &obj->obj, size)) {
227 		kfree(obj);
228 		return NULL;
229 	}
230 
231 	mapping = obj->obj.filp->f_mapping;
232 	mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
233 
234 	DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
235 
236 	return obj;
237 }
238 
239 /* Dumb alloc support */
240 int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
241 	struct drm_mode_create_dumb *args)
242 {
243 	struct armada_gem_object *dobj;
244 	u32 handle;
245 	size_t size;
246 	int ret;
247 
248 	args->pitch = armada_pitch(args->width, args->bpp);
249 	args->size = size = args->pitch * args->height;
250 
251 	dobj = armada_gem_alloc_private_object(dev, size);
252 	if (dobj == NULL)
253 		return -ENOMEM;
254 
255 	ret = armada_gem_linear_back(dev, dobj);
256 	if (ret)
257 		goto err;
258 
259 	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
260 	if (ret)
261 		goto err;
262 
263 	args->handle = handle;
264 
265 	/* drop reference from allocate - handle holds it now */
266 	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
267  err:
268 	drm_gem_object_put_unlocked(&dobj->obj);
269 	return ret;
270 }
271 
272 /* Private driver gem ioctls */
273 int armada_gem_create_ioctl(struct drm_device *dev, void *data,
274 	struct drm_file *file)
275 {
276 	struct drm_armada_gem_create *args = data;
277 	struct armada_gem_object *dobj;
278 	size_t size;
279 	u32 handle;
280 	int ret;
281 
282 	if (args->size == 0)
283 		return -ENOMEM;
284 
285 	size = args->size;
286 
287 	dobj = armada_gem_alloc_object(dev, size);
288 	if (dobj == NULL)
289 		return -ENOMEM;
290 
291 	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
292 	if (ret)
293 		goto err;
294 
295 	args->handle = handle;
296 
297 	/* drop reference from allocate - handle holds it now */
298 	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
299  err:
300 	drm_gem_object_put_unlocked(&dobj->obj);
301 	return ret;
302 }
303 
304 /* Map a shmem-backed object into process memory space */
305 int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
306 	struct drm_file *file)
307 {
308 	struct drm_armada_gem_mmap *args = data;
309 	struct armada_gem_object *dobj;
310 	unsigned long addr;
311 
312 	dobj = armada_gem_object_lookup(file, args->handle);
313 	if (dobj == NULL)
314 		return -ENOENT;
315 
316 	if (!dobj->obj.filp) {
317 		drm_gem_object_put_unlocked(&dobj->obj);
318 		return -EINVAL;
319 	}
320 
321 	addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
322 		       MAP_SHARED, args->offset);
323 	drm_gem_object_put_unlocked(&dobj->obj);
324 	if (IS_ERR_VALUE(addr))
325 		return addr;
326 
327 	args->addr = addr;
328 
329 	return 0;
330 }
331 
332 int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
333 	struct drm_file *file)
334 {
335 	struct drm_armada_gem_pwrite *args = data;
336 	struct armada_gem_object *dobj;
337 	char __user *ptr;
338 	int ret;
339 
340 	DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
341 		args->handle, args->offset, args->size, args->ptr);
342 
343 	if (args->size == 0)
344 		return 0;
345 
346 	ptr = (char __user *)(uintptr_t)args->ptr;
347 
348 	if (!access_ok(VERIFY_READ, ptr, args->size))
349 		return -EFAULT;
350 
351 	ret = fault_in_pages_readable(ptr, args->size);
352 	if (ret)
353 		return ret;
354 
355 	dobj = armada_gem_object_lookup(file, args->handle);
356 	if (dobj == NULL)
357 		return -ENOENT;
358 
359 	/* Must be a kernel-mapped object */
360 	if (!dobj->addr)
361 		return -EINVAL;
362 
363 	if (args->offset > dobj->obj.size ||
364 	    args->size > dobj->obj.size - args->offset) {
365 		DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
366 		ret = -EINVAL;
367 		goto unref;
368 	}
369 
370 	if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
371 		ret = -EFAULT;
372 	} else if (dobj->update) {
373 		dobj->update(dobj->update_data);
374 		ret = 0;
375 	}
376 
377  unref:
378 	drm_gem_object_put_unlocked(&dobj->obj);
379 	return ret;
380 }
381 
382 /* Prime support */
383 static struct sg_table *
384 armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
385 	enum dma_data_direction dir)
386 {
387 	struct drm_gem_object *obj = attach->dmabuf->priv;
388 	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
389 	struct scatterlist *sg;
390 	struct sg_table *sgt;
391 	int i, num;
392 
393 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
394 	if (!sgt)
395 		return NULL;
396 
397 	if (dobj->obj.filp) {
398 		struct address_space *mapping;
399 		int count;
400 
401 		count = dobj->obj.size / PAGE_SIZE;
402 		if (sg_alloc_table(sgt, count, GFP_KERNEL))
403 			goto free_sgt;
404 
405 		mapping = dobj->obj.filp->f_mapping;
406 
407 		for_each_sg(sgt->sgl, sg, count, i) {
408 			struct page *page;
409 
410 			page = shmem_read_mapping_page(mapping, i);
411 			if (IS_ERR(page)) {
412 				num = i;
413 				goto release;
414 			}
415 
416 			sg_set_page(sg, page, PAGE_SIZE, 0);
417 		}
418 
419 		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
420 			num = sgt->nents;
421 			goto release;
422 		}
423 	} else if (dobj->page) {
424 		/* Single contiguous page */
425 		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
426 			goto free_sgt;
427 
428 		sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
429 
430 		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
431 			goto free_table;
432 	} else if (dobj->linear) {
433 		/* Single contiguous physical region - no struct page */
434 		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
435 			goto free_sgt;
436 		sg_dma_address(sgt->sgl) = dobj->dev_addr;
437 		sg_dma_len(sgt->sgl) = dobj->obj.size;
438 	} else {
439 		goto free_sgt;
440 	}
441 	return sgt;
442 
443  release:
444 	for_each_sg(sgt->sgl, sg, num, i)
445 		put_page(sg_page(sg));
446  free_table:
447 	sg_free_table(sgt);
448  free_sgt:
449 	kfree(sgt);
450 	return NULL;
451 }
452 
453 static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
454 	struct sg_table *sgt, enum dma_data_direction dir)
455 {
456 	struct drm_gem_object *obj = attach->dmabuf->priv;
457 	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
458 	int i;
459 
460 	if (!dobj->linear)
461 		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
462 
463 	if (dobj->obj.filp) {
464 		struct scatterlist *sg;
465 		for_each_sg(sgt->sgl, sg, sgt->nents, i)
466 			put_page(sg_page(sg));
467 	}
468 
469 	sg_free_table(sgt);
470 	kfree(sgt);
471 }
472 
473 static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
474 {
475 	return NULL;
476 }
477 
478 static void
479 armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
480 {
481 }
482 
483 static int
484 armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
485 {
486 	return -EINVAL;
487 }
488 
489 static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
490 	.map_dma_buf	= armada_gem_prime_map_dma_buf,
491 	.unmap_dma_buf	= armada_gem_prime_unmap_dma_buf,
492 	.release	= drm_gem_dmabuf_release,
493 	.map_atomic	= armada_gem_dmabuf_no_kmap,
494 	.unmap_atomic	= armada_gem_dmabuf_no_kunmap,
495 	.map		= armada_gem_dmabuf_no_kmap,
496 	.unmap		= armada_gem_dmabuf_no_kunmap,
497 	.mmap		= armada_gem_dmabuf_mmap,
498 };
499 
500 struct dma_buf *
501 armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
502 	int flags)
503 {
504 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
505 
506 	exp_info.ops = &armada_gem_prime_dmabuf_ops;
507 	exp_info.size = obj->size;
508 	exp_info.flags = O_RDWR;
509 	exp_info.priv = obj;
510 
511 	return drm_gem_dmabuf_export(dev, &exp_info);
512 }
513 
514 struct drm_gem_object *
515 armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
516 {
517 	struct dma_buf_attachment *attach;
518 	struct armada_gem_object *dobj;
519 
520 	if (buf->ops == &armada_gem_prime_dmabuf_ops) {
521 		struct drm_gem_object *obj = buf->priv;
522 		if (obj->dev == dev) {
523 			/*
524 			 * Importing our own dmabuf(s) increases the
525 			 * refcount on the gem object itself.
526 			 */
527 			drm_gem_object_get(obj);
528 			return obj;
529 		}
530 	}
531 
532 	attach = dma_buf_attach(buf, dev->dev);
533 	if (IS_ERR(attach))
534 		return ERR_CAST(attach);
535 
536 	dobj = armada_gem_alloc_private_object(dev, buf->size);
537 	if (!dobj) {
538 		dma_buf_detach(buf, attach);
539 		return ERR_PTR(-ENOMEM);
540 	}
541 
542 	dobj->obj.import_attach = attach;
543 	get_dma_buf(buf);
544 
545 	/*
546 	 * Don't call dma_buf_map_attachment() here - it maps the
547 	 * scatterlist immediately for DMA, and this is not always
548 	 * an appropriate thing to do.
549 	 */
550 	return &dobj->obj;
551 }
552 
553 int armada_gem_map_import(struct armada_gem_object *dobj)
554 {
555 	int ret;
556 
557 	dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
558 					   DMA_TO_DEVICE);
559 	if (IS_ERR(dobj->sgt)) {
560 		ret = PTR_ERR(dobj->sgt);
561 		dobj->sgt = NULL;
562 		DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
563 		return ret;
564 	}
565 	if (dobj->sgt->nents > 1) {
566 		DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
567 		return -EINVAL;
568 	}
569 	if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
570 		DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
571 		return -EINVAL;
572 	}
573 	dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
574 	dobj->mapped = true;
575 	return 0;
576 }
577