1 /*
2  * Copyright (C) 2012 Russell King
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 #include <linux/dma-buf.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/shmem_fs.h>
11 #include <drm/drmP.h>
12 #include "armada_drm.h"
13 #include "armada_gem.h"
14 #include <drm/armada_drm.h>
15 #include "armada_ioctlP.h"
16 
17 static int armada_gem_vm_fault(struct vm_fault *vmf)
18 {
19 	struct drm_gem_object *gobj = vmf->vma->vm_private_data;
20 	struct armada_gem_object *obj = drm_to_armada_gem(gobj);
21 	unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
22 	int ret;
23 
24 	pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
25 	ret = vm_insert_pfn(vmf->vma, vmf->address, pfn);
26 
27 	switch (ret) {
28 	case 0:
29 	case -EBUSY:
30 		return VM_FAULT_NOPAGE;
31 	case -ENOMEM:
32 		return VM_FAULT_OOM;
33 	default:
34 		return VM_FAULT_SIGBUS;
35 	}
36 }
37 
38 const struct vm_operations_struct armada_gem_vm_ops = {
39 	.fault	= armada_gem_vm_fault,
40 	.open	= drm_gem_vm_open,
41 	.close	= drm_gem_vm_close,
42 };
43 
44 static size_t roundup_gem_size(size_t size)
45 {
46 	return roundup(size, PAGE_SIZE);
47 }
48 
49 void armada_gem_free_object(struct drm_gem_object *obj)
50 {
51 	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
52 	struct armada_private *priv = obj->dev->dev_private;
53 
54 	DRM_DEBUG_DRIVER("release obj %p\n", dobj);
55 
56 	drm_gem_free_mmap_offset(&dobj->obj);
57 
58 	might_lock(&priv->linear_lock);
59 
60 	if (dobj->page) {
61 		/* page backed memory */
62 		unsigned int order = get_order(dobj->obj.size);
63 		__free_pages(dobj->page, order);
64 	} else if (dobj->linear) {
65 		/* linear backed memory */
66 		mutex_lock(&priv->linear_lock);
67 		drm_mm_remove_node(dobj->linear);
68 		mutex_unlock(&priv->linear_lock);
69 		kfree(dobj->linear);
70 		if (dobj->addr)
71 			iounmap(dobj->addr);
72 	}
73 
74 	if (dobj->obj.import_attach) {
75 		/* We only ever display imported data */
76 		if (dobj->sgt)
77 			dma_buf_unmap_attachment(dobj->obj.import_attach,
78 						 dobj->sgt, DMA_TO_DEVICE);
79 		drm_prime_gem_destroy(&dobj->obj, NULL);
80 	}
81 
82 	drm_gem_object_release(&dobj->obj);
83 
84 	kfree(dobj);
85 }
86 
87 int
88 armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
89 {
90 	struct armada_private *priv = dev->dev_private;
91 	size_t size = obj->obj.size;
92 
93 	if (obj->page || obj->linear)
94 		return 0;
95 
96 	/*
97 	 * If it is a small allocation (typically cursor, which will
98 	 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
99 	 * Framebuffers will never be this small (our minimum size for
100 	 * framebuffers is larger than this anyway.)  Such objects are
101 	 * only accessed by the CPU so we don't need any special handing
102 	 * here.
103 	 */
104 	if (size <= 8192) {
105 		unsigned int order = get_order(size);
106 		struct page *p = alloc_pages(GFP_KERNEL, order);
107 
108 		if (p) {
109 			obj->addr = page_address(p);
110 			obj->phys_addr = page_to_phys(p);
111 			obj->page = p;
112 
113 			memset(obj->addr, 0, PAGE_ALIGN(size));
114 		}
115 	}
116 
117 	/*
118 	 * We could grab something from CMA if it's enabled, but that
119 	 * involves building in a problem:
120 	 *
121 	 * CMA's interface uses dma_alloc_coherent(), which provides us
122 	 * with an CPU virtual address and a device address.
123 	 *
124 	 * The CPU virtual address may be either an address in the kernel
125 	 * direct mapped region (for example, as it would be on x86) or
126 	 * it may be remapped into another part of kernel memory space
127 	 * (eg, as it would be on ARM.)  This means virt_to_phys() on the
128 	 * returned virtual address is invalid depending on the architecture
129 	 * implementation.
130 	 *
131 	 * The device address may also not be a physical address; it may
132 	 * be that there is some kind of remapping between the device and
133 	 * system RAM, which makes the use of the device address also
134 	 * unsafe to re-use as a physical address.
135 	 *
136 	 * This makes DRM usage of dma_alloc_coherent() in a generic way
137 	 * at best very questionable and unsafe.
138 	 */
139 
140 	/* Otherwise, grab it from our linear allocation */
141 	if (!obj->page) {
142 		struct drm_mm_node *node;
143 		unsigned align = min_t(unsigned, size, SZ_2M);
144 		void __iomem *ptr;
145 		int ret;
146 
147 		node = kzalloc(sizeof(*node), GFP_KERNEL);
148 		if (!node)
149 			return -ENOSPC;
150 
151 		mutex_lock(&priv->linear_lock);
152 		ret = drm_mm_insert_node_generic(&priv->linear, node,
153 						 size, align, 0, 0);
154 		mutex_unlock(&priv->linear_lock);
155 		if (ret) {
156 			kfree(node);
157 			return ret;
158 		}
159 
160 		obj->linear = node;
161 
162 		/* Ensure that the memory we're returning is cleared. */
163 		ptr = ioremap_wc(obj->linear->start, size);
164 		if (!ptr) {
165 			mutex_lock(&priv->linear_lock);
166 			drm_mm_remove_node(obj->linear);
167 			mutex_unlock(&priv->linear_lock);
168 			kfree(obj->linear);
169 			obj->linear = NULL;
170 			return -ENOMEM;
171 		}
172 
173 		memset_io(ptr, 0, size);
174 		iounmap(ptr);
175 
176 		obj->phys_addr = obj->linear->start;
177 		obj->dev_addr = obj->linear->start;
178 		obj->mapped = true;
179 	}
180 
181 	DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
182 			 (unsigned long long)obj->phys_addr,
183 			 (unsigned long long)obj->dev_addr);
184 
185 	return 0;
186 }
187 
188 void *
189 armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
190 {
191 	/* only linear objects need to be ioremap'd */
192 	if (!dobj->addr && dobj->linear)
193 		dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
194 	return dobj->addr;
195 }
196 
197 struct armada_gem_object *
198 armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
199 {
200 	struct armada_gem_object *obj;
201 
202 	size = roundup_gem_size(size);
203 
204 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
205 	if (!obj)
206 		return NULL;
207 
208 	drm_gem_private_object_init(dev, &obj->obj, size);
209 
210 	DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
211 
212 	return obj;
213 }
214 
215 static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
216 	size_t size)
217 {
218 	struct armada_gem_object *obj;
219 	struct address_space *mapping;
220 
221 	size = roundup_gem_size(size);
222 
223 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
224 	if (!obj)
225 		return NULL;
226 
227 	if (drm_gem_object_init(dev, &obj->obj, size)) {
228 		kfree(obj);
229 		return NULL;
230 	}
231 
232 	mapping = obj->obj.filp->f_mapping;
233 	mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
234 
235 	DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
236 
237 	return obj;
238 }
239 
240 /* Dumb alloc support */
241 int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
242 	struct drm_mode_create_dumb *args)
243 {
244 	struct armada_gem_object *dobj;
245 	u32 handle;
246 	size_t size;
247 	int ret;
248 
249 	args->pitch = armada_pitch(args->width, args->bpp);
250 	args->size = size = args->pitch * args->height;
251 
252 	dobj = armada_gem_alloc_private_object(dev, size);
253 	if (dobj == NULL)
254 		return -ENOMEM;
255 
256 	ret = armada_gem_linear_back(dev, dobj);
257 	if (ret)
258 		goto err;
259 
260 	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
261 	if (ret)
262 		goto err;
263 
264 	args->handle = handle;
265 
266 	/* drop reference from allocate - handle holds it now */
267 	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
268  err:
269 	drm_gem_object_unreference_unlocked(&dobj->obj);
270 	return ret;
271 }
272 
273 /* Private driver gem ioctls */
274 int armada_gem_create_ioctl(struct drm_device *dev, void *data,
275 	struct drm_file *file)
276 {
277 	struct drm_armada_gem_create *args = data;
278 	struct armada_gem_object *dobj;
279 	size_t size;
280 	u32 handle;
281 	int ret;
282 
283 	if (args->size == 0)
284 		return -ENOMEM;
285 
286 	size = args->size;
287 
288 	dobj = armada_gem_alloc_object(dev, size);
289 	if (dobj == NULL)
290 		return -ENOMEM;
291 
292 	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
293 	if (ret)
294 		goto err;
295 
296 	args->handle = handle;
297 
298 	/* drop reference from allocate - handle holds it now */
299 	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
300  err:
301 	drm_gem_object_unreference_unlocked(&dobj->obj);
302 	return ret;
303 }
304 
305 /* Map a shmem-backed object into process memory space */
306 int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
307 	struct drm_file *file)
308 {
309 	struct drm_armada_gem_mmap *args = data;
310 	struct armada_gem_object *dobj;
311 	unsigned long addr;
312 
313 	dobj = armada_gem_object_lookup(file, args->handle);
314 	if (dobj == NULL)
315 		return -ENOENT;
316 
317 	if (!dobj->obj.filp) {
318 		drm_gem_object_unreference_unlocked(&dobj->obj);
319 		return -EINVAL;
320 	}
321 
322 	addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
323 		       MAP_SHARED, args->offset);
324 	drm_gem_object_unreference_unlocked(&dobj->obj);
325 	if (IS_ERR_VALUE(addr))
326 		return addr;
327 
328 	args->addr = addr;
329 
330 	return 0;
331 }
332 
333 int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
334 	struct drm_file *file)
335 {
336 	struct drm_armada_gem_pwrite *args = data;
337 	struct armada_gem_object *dobj;
338 	char __user *ptr;
339 	int ret;
340 
341 	DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
342 		args->handle, args->offset, args->size, args->ptr);
343 
344 	if (args->size == 0)
345 		return 0;
346 
347 	ptr = (char __user *)(uintptr_t)args->ptr;
348 
349 	if (!access_ok(VERIFY_READ, ptr, args->size))
350 		return -EFAULT;
351 
352 	ret = fault_in_pages_readable(ptr, args->size);
353 	if (ret)
354 		return ret;
355 
356 	dobj = armada_gem_object_lookup(file, args->handle);
357 	if (dobj == NULL)
358 		return -ENOENT;
359 
360 	/* Must be a kernel-mapped object */
361 	if (!dobj->addr)
362 		return -EINVAL;
363 
364 	if (args->offset > dobj->obj.size ||
365 	    args->size > dobj->obj.size - args->offset) {
366 		DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
367 		ret = -EINVAL;
368 		goto unref;
369 	}
370 
371 	if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
372 		ret = -EFAULT;
373 	} else if (dobj->update) {
374 		dobj->update(dobj->update_data);
375 		ret = 0;
376 	}
377 
378  unref:
379 	drm_gem_object_unreference_unlocked(&dobj->obj);
380 	return ret;
381 }
382 
383 /* Prime support */
384 static struct sg_table *
385 armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
386 	enum dma_data_direction dir)
387 {
388 	struct drm_gem_object *obj = attach->dmabuf->priv;
389 	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
390 	struct scatterlist *sg;
391 	struct sg_table *sgt;
392 	int i, num;
393 
394 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
395 	if (!sgt)
396 		return NULL;
397 
398 	if (dobj->obj.filp) {
399 		struct address_space *mapping;
400 		int count;
401 
402 		count = dobj->obj.size / PAGE_SIZE;
403 		if (sg_alloc_table(sgt, count, GFP_KERNEL))
404 			goto free_sgt;
405 
406 		mapping = dobj->obj.filp->f_mapping;
407 
408 		for_each_sg(sgt->sgl, sg, count, i) {
409 			struct page *page;
410 
411 			page = shmem_read_mapping_page(mapping, i);
412 			if (IS_ERR(page)) {
413 				num = i;
414 				goto release;
415 			}
416 
417 			sg_set_page(sg, page, PAGE_SIZE, 0);
418 		}
419 
420 		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
421 			num = sgt->nents;
422 			goto release;
423 		}
424 	} else if (dobj->page) {
425 		/* Single contiguous page */
426 		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
427 			goto free_sgt;
428 
429 		sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
430 
431 		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
432 			goto free_table;
433 	} else if (dobj->linear) {
434 		/* Single contiguous physical region - no struct page */
435 		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
436 			goto free_sgt;
437 		sg_dma_address(sgt->sgl) = dobj->dev_addr;
438 		sg_dma_len(sgt->sgl) = dobj->obj.size;
439 	} else {
440 		goto free_sgt;
441 	}
442 	return sgt;
443 
444  release:
445 	for_each_sg(sgt->sgl, sg, num, i)
446 		put_page(sg_page(sg));
447  free_table:
448 	sg_free_table(sgt);
449  free_sgt:
450 	kfree(sgt);
451 	return NULL;
452 }
453 
454 static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
455 	struct sg_table *sgt, enum dma_data_direction dir)
456 {
457 	struct drm_gem_object *obj = attach->dmabuf->priv;
458 	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
459 	int i;
460 
461 	if (!dobj->linear)
462 		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
463 
464 	if (dobj->obj.filp) {
465 		struct scatterlist *sg;
466 		for_each_sg(sgt->sgl, sg, sgt->nents, i)
467 			put_page(sg_page(sg));
468 	}
469 
470 	sg_free_table(sgt);
471 	kfree(sgt);
472 }
473 
474 static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
475 {
476 	return NULL;
477 }
478 
479 static void
480 armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
481 {
482 }
483 
484 static int
485 armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
486 {
487 	return -EINVAL;
488 }
489 
490 static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
491 	.map_dma_buf	= armada_gem_prime_map_dma_buf,
492 	.unmap_dma_buf	= armada_gem_prime_unmap_dma_buf,
493 	.release	= drm_gem_dmabuf_release,
494 	.map_atomic	= armada_gem_dmabuf_no_kmap,
495 	.unmap_atomic	= armada_gem_dmabuf_no_kunmap,
496 	.map		= armada_gem_dmabuf_no_kmap,
497 	.unmap		= armada_gem_dmabuf_no_kunmap,
498 	.mmap		= armada_gem_dmabuf_mmap,
499 };
500 
501 struct dma_buf *
502 armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
503 	int flags)
504 {
505 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
506 
507 	exp_info.ops = &armada_gem_prime_dmabuf_ops;
508 	exp_info.size = obj->size;
509 	exp_info.flags = O_RDWR;
510 	exp_info.priv = obj;
511 
512 	return drm_gem_dmabuf_export(dev, &exp_info);
513 }
514 
515 struct drm_gem_object *
516 armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
517 {
518 	struct dma_buf_attachment *attach;
519 	struct armada_gem_object *dobj;
520 
521 	if (buf->ops == &armada_gem_prime_dmabuf_ops) {
522 		struct drm_gem_object *obj = buf->priv;
523 		if (obj->dev == dev) {
524 			/*
525 			 * Importing our own dmabuf(s) increases the
526 			 * refcount on the gem object itself.
527 			 */
528 			drm_gem_object_reference(obj);
529 			return obj;
530 		}
531 	}
532 
533 	attach = dma_buf_attach(buf, dev->dev);
534 	if (IS_ERR(attach))
535 		return ERR_CAST(attach);
536 
537 	dobj = armada_gem_alloc_private_object(dev, buf->size);
538 	if (!dobj) {
539 		dma_buf_detach(buf, attach);
540 		return ERR_PTR(-ENOMEM);
541 	}
542 
543 	dobj->obj.import_attach = attach;
544 	get_dma_buf(buf);
545 
546 	/*
547 	 * Don't call dma_buf_map_attachment() here - it maps the
548 	 * scatterlist immediately for DMA, and this is not always
549 	 * an appropriate thing to do.
550 	 */
551 	return &dobj->obj;
552 }
553 
554 int armada_gem_map_import(struct armada_gem_object *dobj)
555 {
556 	int ret;
557 
558 	dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
559 					   DMA_TO_DEVICE);
560 	if (IS_ERR(dobj->sgt)) {
561 		ret = PTR_ERR(dobj->sgt);
562 		dobj->sgt = NULL;
563 		DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
564 		return ret;
565 	}
566 	if (dobj->sgt->nents > 1) {
567 		DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
568 		return -EINVAL;
569 	}
570 	if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
571 		DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
572 		return -EINVAL;
573 	}
574 	dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
575 	dobj->mapped = true;
576 	return 0;
577 }
578