1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Russell King
4  */
5 #include <linux/dma-buf.h>
6 #include <linux/dma-mapping.h>
7 #include <linux/shmem_fs.h>
8 #include "armada_drm.h"
9 #include "armada_gem.h"
10 #include <drm/armada_drm.h>
11 #include "armada_ioctlP.h"
12 
13 static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
14 {
15 	struct drm_gem_object *gobj = vmf->vma->vm_private_data;
16 	struct armada_gem_object *obj = drm_to_armada_gem(gobj);
17 	unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
18 
19 	pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
20 	return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
21 }
22 
23 const struct vm_operations_struct armada_gem_vm_ops = {
24 	.fault	= armada_gem_vm_fault,
25 	.open	= drm_gem_vm_open,
26 	.close	= drm_gem_vm_close,
27 };
28 
29 static size_t roundup_gem_size(size_t size)
30 {
31 	return roundup(size, PAGE_SIZE);
32 }
33 
34 void armada_gem_free_object(struct drm_gem_object *obj)
35 {
36 	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
37 	struct armada_private *priv = obj->dev->dev_private;
38 
39 	DRM_DEBUG_DRIVER("release obj %p\n", dobj);
40 
41 	drm_gem_free_mmap_offset(&dobj->obj);
42 
43 	might_lock(&priv->linear_lock);
44 
45 	if (dobj->page) {
46 		/* page backed memory */
47 		unsigned int order = get_order(dobj->obj.size);
48 		__free_pages(dobj->page, order);
49 	} else if (dobj->linear) {
50 		/* linear backed memory */
51 		mutex_lock(&priv->linear_lock);
52 		drm_mm_remove_node(dobj->linear);
53 		mutex_unlock(&priv->linear_lock);
54 		kfree(dobj->linear);
55 		if (dobj->addr)
56 			iounmap(dobj->addr);
57 	}
58 
59 	if (dobj->obj.import_attach) {
60 		/* We only ever display imported data */
61 		if (dobj->sgt)
62 			dma_buf_unmap_attachment(dobj->obj.import_attach,
63 						 dobj->sgt, DMA_TO_DEVICE);
64 		drm_prime_gem_destroy(&dobj->obj, NULL);
65 	}
66 
67 	drm_gem_object_release(&dobj->obj);
68 
69 	kfree(dobj);
70 }
71 
72 int
73 armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
74 {
75 	struct armada_private *priv = dev->dev_private;
76 	size_t size = obj->obj.size;
77 
78 	if (obj->page || obj->linear)
79 		return 0;
80 
81 	/*
82 	 * If it is a small allocation (typically cursor, which will
83 	 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
84 	 * Framebuffers will never be this small (our minimum size for
85 	 * framebuffers is larger than this anyway.)  Such objects are
86 	 * only accessed by the CPU so we don't need any special handing
87 	 * here.
88 	 */
89 	if (size <= 8192) {
90 		unsigned int order = get_order(size);
91 		struct page *p = alloc_pages(GFP_KERNEL, order);
92 
93 		if (p) {
94 			obj->addr = page_address(p);
95 			obj->phys_addr = page_to_phys(p);
96 			obj->page = p;
97 
98 			memset(obj->addr, 0, PAGE_ALIGN(size));
99 		}
100 	}
101 
102 	/*
103 	 * We could grab something from CMA if it's enabled, but that
104 	 * involves building in a problem:
105 	 *
106 	 * CMA's interface uses dma_alloc_coherent(), which provides us
107 	 * with an CPU virtual address and a device address.
108 	 *
109 	 * The CPU virtual address may be either an address in the kernel
110 	 * direct mapped region (for example, as it would be on x86) or
111 	 * it may be remapped into another part of kernel memory space
112 	 * (eg, as it would be on ARM.)  This means virt_to_phys() on the
113 	 * returned virtual address is invalid depending on the architecture
114 	 * implementation.
115 	 *
116 	 * The device address may also not be a physical address; it may
117 	 * be that there is some kind of remapping between the device and
118 	 * system RAM, which makes the use of the device address also
119 	 * unsafe to re-use as a physical address.
120 	 *
121 	 * This makes DRM usage of dma_alloc_coherent() in a generic way
122 	 * at best very questionable and unsafe.
123 	 */
124 
125 	/* Otherwise, grab it from our linear allocation */
126 	if (!obj->page) {
127 		struct drm_mm_node *node;
128 		unsigned align = min_t(unsigned, size, SZ_2M);
129 		void __iomem *ptr;
130 		int ret;
131 
132 		node = kzalloc(sizeof(*node), GFP_KERNEL);
133 		if (!node)
134 			return -ENOSPC;
135 
136 		mutex_lock(&priv->linear_lock);
137 		ret = drm_mm_insert_node_generic(&priv->linear, node,
138 						 size, align, 0, 0);
139 		mutex_unlock(&priv->linear_lock);
140 		if (ret) {
141 			kfree(node);
142 			return ret;
143 		}
144 
145 		obj->linear = node;
146 
147 		/* Ensure that the memory we're returning is cleared. */
148 		ptr = ioremap_wc(obj->linear->start, size);
149 		if (!ptr) {
150 			mutex_lock(&priv->linear_lock);
151 			drm_mm_remove_node(obj->linear);
152 			mutex_unlock(&priv->linear_lock);
153 			kfree(obj->linear);
154 			obj->linear = NULL;
155 			return -ENOMEM;
156 		}
157 
158 		memset_io(ptr, 0, size);
159 		iounmap(ptr);
160 
161 		obj->phys_addr = obj->linear->start;
162 		obj->dev_addr = obj->linear->start;
163 		obj->mapped = true;
164 	}
165 
166 	DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
167 			 (unsigned long long)obj->phys_addr,
168 			 (unsigned long long)obj->dev_addr);
169 
170 	return 0;
171 }
172 
173 void *
174 armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
175 {
176 	/* only linear objects need to be ioremap'd */
177 	if (!dobj->addr && dobj->linear)
178 		dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
179 	return dobj->addr;
180 }
181 
182 struct armada_gem_object *
183 armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
184 {
185 	struct armada_gem_object *obj;
186 
187 	size = roundup_gem_size(size);
188 
189 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
190 	if (!obj)
191 		return NULL;
192 
193 	drm_gem_private_object_init(dev, &obj->obj, size);
194 
195 	DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
196 
197 	return obj;
198 }
199 
200 static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
201 	size_t size)
202 {
203 	struct armada_gem_object *obj;
204 	struct address_space *mapping;
205 
206 	size = roundup_gem_size(size);
207 
208 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
209 	if (!obj)
210 		return NULL;
211 
212 	if (drm_gem_object_init(dev, &obj->obj, size)) {
213 		kfree(obj);
214 		return NULL;
215 	}
216 
217 	mapping = obj->obj.filp->f_mapping;
218 	mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
219 
220 	DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
221 
222 	return obj;
223 }
224 
225 /* Dumb alloc support */
226 int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
227 	struct drm_mode_create_dumb *args)
228 {
229 	struct armada_gem_object *dobj;
230 	u32 handle;
231 	size_t size;
232 	int ret;
233 
234 	args->pitch = armada_pitch(args->width, args->bpp);
235 	args->size = size = args->pitch * args->height;
236 
237 	dobj = armada_gem_alloc_private_object(dev, size);
238 	if (dobj == NULL)
239 		return -ENOMEM;
240 
241 	ret = armada_gem_linear_back(dev, dobj);
242 	if (ret)
243 		goto err;
244 
245 	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
246 	if (ret)
247 		goto err;
248 
249 	args->handle = handle;
250 
251 	/* drop reference from allocate - handle holds it now */
252 	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
253  err:
254 	drm_gem_object_put_unlocked(&dobj->obj);
255 	return ret;
256 }
257 
258 /* Private driver gem ioctls */
259 int armada_gem_create_ioctl(struct drm_device *dev, void *data,
260 	struct drm_file *file)
261 {
262 	struct drm_armada_gem_create *args = data;
263 	struct armada_gem_object *dobj;
264 	size_t size;
265 	u32 handle;
266 	int ret;
267 
268 	if (args->size == 0)
269 		return -ENOMEM;
270 
271 	size = args->size;
272 
273 	dobj = armada_gem_alloc_object(dev, size);
274 	if (dobj == NULL)
275 		return -ENOMEM;
276 
277 	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
278 	if (ret)
279 		goto err;
280 
281 	args->handle = handle;
282 
283 	/* drop reference from allocate - handle holds it now */
284 	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
285  err:
286 	drm_gem_object_put_unlocked(&dobj->obj);
287 	return ret;
288 }
289 
290 /* Map a shmem-backed object into process memory space */
291 int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
292 	struct drm_file *file)
293 {
294 	struct drm_armada_gem_mmap *args = data;
295 	struct armada_gem_object *dobj;
296 	unsigned long addr;
297 
298 	dobj = armada_gem_object_lookup(file, args->handle);
299 	if (dobj == NULL)
300 		return -ENOENT;
301 
302 	if (!dobj->obj.filp) {
303 		drm_gem_object_put_unlocked(&dobj->obj);
304 		return -EINVAL;
305 	}
306 
307 	addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
308 		       MAP_SHARED, args->offset);
309 	drm_gem_object_put_unlocked(&dobj->obj);
310 	if (IS_ERR_VALUE(addr))
311 		return addr;
312 
313 	args->addr = addr;
314 
315 	return 0;
316 }
317 
318 int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
319 	struct drm_file *file)
320 {
321 	struct drm_armada_gem_pwrite *args = data;
322 	struct armada_gem_object *dobj;
323 	char __user *ptr;
324 	int ret;
325 
326 	DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
327 		args->handle, args->offset, args->size, args->ptr);
328 
329 	if (args->size == 0)
330 		return 0;
331 
332 	ptr = (char __user *)(uintptr_t)args->ptr;
333 
334 	if (!access_ok(ptr, args->size))
335 		return -EFAULT;
336 
337 	ret = fault_in_pages_readable(ptr, args->size);
338 	if (ret)
339 		return ret;
340 
341 	dobj = armada_gem_object_lookup(file, args->handle);
342 	if (dobj == NULL)
343 		return -ENOENT;
344 
345 	/* Must be a kernel-mapped object */
346 	if (!dobj->addr)
347 		return -EINVAL;
348 
349 	if (args->offset > dobj->obj.size ||
350 	    args->size > dobj->obj.size - args->offset) {
351 		DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
352 		ret = -EINVAL;
353 		goto unref;
354 	}
355 
356 	if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
357 		ret = -EFAULT;
358 	} else if (dobj->update) {
359 		dobj->update(dobj->update_data);
360 		ret = 0;
361 	}
362 
363  unref:
364 	drm_gem_object_put_unlocked(&dobj->obj);
365 	return ret;
366 }
367 
368 /* Prime support */
369 static struct sg_table *
370 armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
371 	enum dma_data_direction dir)
372 {
373 	struct drm_gem_object *obj = attach->dmabuf->priv;
374 	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
375 	struct scatterlist *sg;
376 	struct sg_table *sgt;
377 	int i, num;
378 
379 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
380 	if (!sgt)
381 		return NULL;
382 
383 	if (dobj->obj.filp) {
384 		struct address_space *mapping;
385 		int count;
386 
387 		count = dobj->obj.size / PAGE_SIZE;
388 		if (sg_alloc_table(sgt, count, GFP_KERNEL))
389 			goto free_sgt;
390 
391 		mapping = dobj->obj.filp->f_mapping;
392 
393 		for_each_sg(sgt->sgl, sg, count, i) {
394 			struct page *page;
395 
396 			page = shmem_read_mapping_page(mapping, i);
397 			if (IS_ERR(page)) {
398 				num = i;
399 				goto release;
400 			}
401 
402 			sg_set_page(sg, page, PAGE_SIZE, 0);
403 		}
404 
405 		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
406 			num = sgt->nents;
407 			goto release;
408 		}
409 	} else if (dobj->page) {
410 		/* Single contiguous page */
411 		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
412 			goto free_sgt;
413 
414 		sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
415 
416 		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
417 			goto free_table;
418 	} else if (dobj->linear) {
419 		/* Single contiguous physical region - no struct page */
420 		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
421 			goto free_sgt;
422 		sg_dma_address(sgt->sgl) = dobj->dev_addr;
423 		sg_dma_len(sgt->sgl) = dobj->obj.size;
424 	} else {
425 		goto free_sgt;
426 	}
427 	return sgt;
428 
429  release:
430 	for_each_sg(sgt->sgl, sg, num, i)
431 		put_page(sg_page(sg));
432  free_table:
433 	sg_free_table(sgt);
434  free_sgt:
435 	kfree(sgt);
436 	return NULL;
437 }
438 
439 static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
440 	struct sg_table *sgt, enum dma_data_direction dir)
441 {
442 	struct drm_gem_object *obj = attach->dmabuf->priv;
443 	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
444 	int i;
445 
446 	if (!dobj->linear)
447 		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
448 
449 	if (dobj->obj.filp) {
450 		struct scatterlist *sg;
451 		for_each_sg(sgt->sgl, sg, sgt->nents, i)
452 			put_page(sg_page(sg));
453 	}
454 
455 	sg_free_table(sgt);
456 	kfree(sgt);
457 }
458 
459 static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
460 {
461 	return NULL;
462 }
463 
464 static void
465 armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
466 {
467 }
468 
469 static int
470 armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
471 {
472 	return -EINVAL;
473 }
474 
475 static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
476 	.map_dma_buf	= armada_gem_prime_map_dma_buf,
477 	.unmap_dma_buf	= armada_gem_prime_unmap_dma_buf,
478 	.release	= drm_gem_dmabuf_release,
479 	.map		= armada_gem_dmabuf_no_kmap,
480 	.unmap		= armada_gem_dmabuf_no_kunmap,
481 	.mmap		= armada_gem_dmabuf_mmap,
482 };
483 
484 struct dma_buf *
485 armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
486 	int flags)
487 {
488 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
489 
490 	exp_info.ops = &armada_gem_prime_dmabuf_ops;
491 	exp_info.size = obj->size;
492 	exp_info.flags = O_RDWR;
493 	exp_info.priv = obj;
494 
495 	return drm_gem_dmabuf_export(dev, &exp_info);
496 }
497 
498 struct drm_gem_object *
499 armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
500 {
501 	struct dma_buf_attachment *attach;
502 	struct armada_gem_object *dobj;
503 
504 	if (buf->ops == &armada_gem_prime_dmabuf_ops) {
505 		struct drm_gem_object *obj = buf->priv;
506 		if (obj->dev == dev) {
507 			/*
508 			 * Importing our own dmabuf(s) increases the
509 			 * refcount on the gem object itself.
510 			 */
511 			drm_gem_object_get(obj);
512 			return obj;
513 		}
514 	}
515 
516 	attach = dma_buf_attach(buf, dev->dev);
517 	if (IS_ERR(attach))
518 		return ERR_CAST(attach);
519 
520 	dobj = armada_gem_alloc_private_object(dev, buf->size);
521 	if (!dobj) {
522 		dma_buf_detach(buf, attach);
523 		return ERR_PTR(-ENOMEM);
524 	}
525 
526 	dobj->obj.import_attach = attach;
527 	get_dma_buf(buf);
528 
529 	/*
530 	 * Don't call dma_buf_map_attachment() here - it maps the
531 	 * scatterlist immediately for DMA, and this is not always
532 	 * an appropriate thing to do.
533 	 */
534 	return &dobj->obj;
535 }
536 
537 int armada_gem_map_import(struct armada_gem_object *dobj)
538 {
539 	int ret;
540 
541 	dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
542 					   DMA_TO_DEVICE);
543 	if (IS_ERR(dobj->sgt)) {
544 		ret = PTR_ERR(dobj->sgt);
545 		dobj->sgt = NULL;
546 		DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
547 		return ret;
548 	}
549 	if (dobj->sgt->nents > 1) {
550 		DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
551 		return -EINVAL;
552 	}
553 	if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
554 		DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
555 		return -EINVAL;
556 	}
557 	dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
558 	dobj->mapped = true;
559 	return 0;
560 }
561