1 /*
2  * helper functions for physically contiguous capture buffers
3  *
4  * The functions support hardware lacking scatter gather support
5  * (i.e. the buffers must be linear in physical memory)
6  *
7  * Copyright (c) 2008 Magnus Damm
8  *
9  * Based on videobuf-vmalloc.c,
10  * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2
15  */
16 
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/mm.h>
20 #include <linux/pagemap.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <media/videobuf-dma-contig.h>
25 
26 struct videobuf_dma_contig_memory {
27 	u32 magic;
28 	void *vaddr;
29 	dma_addr_t dma_handle;
30 	bool cached;
31 	unsigned long size;
32 };
33 
34 #define MAGIC_DC_MEM 0x0733ac61
35 #define MAGIC_CHECK(is, should)						    \
36 	if (unlikely((is) != (should)))	{				    \
37 		pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
38 		BUG();							    \
39 	}
40 
41 static int __videobuf_dc_alloc(struct device *dev,
42 			       struct videobuf_dma_contig_memory *mem,
43 			       unsigned long size, gfp_t flags)
44 {
45 	mem->size = size;
46 	if (mem->cached) {
47 		mem->vaddr = alloc_pages_exact(mem->size, flags | GFP_DMA);
48 		if (mem->vaddr) {
49 			int err;
50 
51 			mem->dma_handle = dma_map_single(dev, mem->vaddr,
52 							 mem->size,
53 							 DMA_FROM_DEVICE);
54 			err = dma_mapping_error(dev, mem->dma_handle);
55 			if (err) {
56 				dev_err(dev, "dma_map_single failed\n");
57 
58 				free_pages_exact(mem->vaddr, mem->size);
59 				mem->vaddr = NULL;
60 				return err;
61 			}
62 		}
63 	} else
64 		mem->vaddr = dma_alloc_coherent(dev, mem->size,
65 						&mem->dma_handle, flags);
66 
67 	if (!mem->vaddr) {
68 		dev_err(dev, "memory alloc size %ld failed\n", mem->size);
69 		return -ENOMEM;
70 	}
71 
72 	dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size);
73 
74 	return 0;
75 }
76 
77 static void __videobuf_dc_free(struct device *dev,
78 			       struct videobuf_dma_contig_memory *mem)
79 {
80 	if (mem->cached) {
81 		if (!mem->vaddr)
82 			return;
83 		dma_unmap_single(dev, mem->dma_handle, mem->size,
84 				 DMA_FROM_DEVICE);
85 		free_pages_exact(mem->vaddr, mem->size);
86 	} else
87 		dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
88 
89 	mem->vaddr = NULL;
90 }
91 
92 static void videobuf_vm_open(struct vm_area_struct *vma)
93 {
94 	struct videobuf_mapping *map = vma->vm_private_data;
95 
96 	dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
97 		map, map->count, vma->vm_start, vma->vm_end);
98 
99 	map->count++;
100 }
101 
102 static void videobuf_vm_close(struct vm_area_struct *vma)
103 {
104 	struct videobuf_mapping *map = vma->vm_private_data;
105 	struct videobuf_queue *q = map->q;
106 	int i;
107 
108 	dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
109 		map, map->count, vma->vm_start, vma->vm_end);
110 
111 	map->count--;
112 	if (0 == map->count) {
113 		struct videobuf_dma_contig_memory *mem;
114 
115 		dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
116 		videobuf_queue_lock(q);
117 
118 		/* We need first to cancel streams, before unmapping */
119 		if (q->streaming)
120 			videobuf_queue_cancel(q);
121 
122 		for (i = 0; i < VIDEO_MAX_FRAME; i++) {
123 			if (NULL == q->bufs[i])
124 				continue;
125 
126 			if (q->bufs[i]->map != map)
127 				continue;
128 
129 			mem = q->bufs[i]->priv;
130 			if (mem) {
131 				/* This callback is called only if kernel has
132 				   allocated memory and this memory is mmapped.
133 				   In this case, memory should be freed,
134 				   in order to do memory unmap.
135 				 */
136 
137 				MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
138 
139 				/* vfree is not atomic - can't be
140 				   called with IRQ's disabled
141 				 */
142 				dev_dbg(q->dev, "buf[%d] freeing %p\n",
143 					i, mem->vaddr);
144 
145 				__videobuf_dc_free(q->dev, mem);
146 				mem->vaddr = NULL;
147 			}
148 
149 			q->bufs[i]->map = NULL;
150 			q->bufs[i]->baddr = 0;
151 		}
152 
153 		kfree(map);
154 
155 		videobuf_queue_unlock(q);
156 	}
157 }
158 
159 static const struct vm_operations_struct videobuf_vm_ops = {
160 	.open	= videobuf_vm_open,
161 	.close	= videobuf_vm_close,
162 };
163 
164 /**
165  * videobuf_dma_contig_user_put() - reset pointer to user space buffer
166  * @mem: per-buffer private videobuf-dma-contig data
167  *
168  * This function resets the user space pointer
169  */
170 static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
171 {
172 	mem->dma_handle = 0;
173 	mem->size = 0;
174 }
175 
176 /**
177  * videobuf_dma_contig_user_get() - setup user space memory pointer
178  * @mem: per-buffer private videobuf-dma-contig data
179  * @vb: video buffer to map
180  *
181  * This function validates and sets up a pointer to user space memory.
182  * Only physically contiguous pfn-mapped memory is accepted.
183  *
184  * Returns 0 if successful.
185  */
186 static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
187 					struct videobuf_buffer *vb)
188 {
189 	struct mm_struct *mm = current->mm;
190 	struct vm_area_struct *vma;
191 	unsigned long prev_pfn, this_pfn;
192 	unsigned long pages_done, user_address;
193 	unsigned int offset;
194 	int ret;
195 
196 	offset = vb->baddr & ~PAGE_MASK;
197 	mem->size = PAGE_ALIGN(vb->size + offset);
198 	ret = -EINVAL;
199 
200 	down_read(&mm->mmap_sem);
201 
202 	vma = find_vma(mm, vb->baddr);
203 	if (!vma)
204 		goto out_up;
205 
206 	if ((vb->baddr + mem->size) > vma->vm_end)
207 		goto out_up;
208 
209 	pages_done = 0;
210 	prev_pfn = 0; /* kill warning */
211 	user_address = vb->baddr;
212 
213 	while (pages_done < (mem->size >> PAGE_SHIFT)) {
214 		ret = follow_pfn(vma, user_address, &this_pfn);
215 		if (ret)
216 			break;
217 
218 		if (pages_done == 0)
219 			mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
220 		else if (this_pfn != (prev_pfn + 1))
221 			ret = -EFAULT;
222 
223 		if (ret)
224 			break;
225 
226 		prev_pfn = this_pfn;
227 		user_address += PAGE_SIZE;
228 		pages_done++;
229 	}
230 
231 out_up:
232 	up_read(&current->mm->mmap_sem);
233 
234 	return ret;
235 }
236 
237 static struct videobuf_buffer *__videobuf_alloc_vb(size_t size, bool cached)
238 {
239 	struct videobuf_dma_contig_memory *mem;
240 	struct videobuf_buffer *vb;
241 
242 	vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
243 	if (vb) {
244 		vb->priv = ((char *)vb) + size;
245 		mem = vb->priv;
246 		mem->magic = MAGIC_DC_MEM;
247 		mem->cached = cached;
248 	}
249 
250 	return vb;
251 }
252 
253 static struct videobuf_buffer *__videobuf_alloc_uncached(size_t size)
254 {
255 	return __videobuf_alloc_vb(size, false);
256 }
257 
258 static struct videobuf_buffer *__videobuf_alloc_cached(size_t size)
259 {
260 	return __videobuf_alloc_vb(size, true);
261 }
262 
263 static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
264 {
265 	struct videobuf_dma_contig_memory *mem = buf->priv;
266 
267 	BUG_ON(!mem);
268 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
269 
270 	return mem->vaddr;
271 }
272 
273 static int __videobuf_iolock(struct videobuf_queue *q,
274 			     struct videobuf_buffer *vb,
275 			     struct v4l2_framebuffer *fbuf)
276 {
277 	struct videobuf_dma_contig_memory *mem = vb->priv;
278 
279 	BUG_ON(!mem);
280 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
281 
282 	switch (vb->memory) {
283 	case V4L2_MEMORY_MMAP:
284 		dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
285 
286 		/* All handling should be done by __videobuf_mmap_mapper() */
287 		if (!mem->vaddr) {
288 			dev_err(q->dev, "memory is not alloced/mmapped.\n");
289 			return -EINVAL;
290 		}
291 		break;
292 	case V4L2_MEMORY_USERPTR:
293 		dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
294 
295 		/* handle pointer from user space */
296 		if (vb->baddr)
297 			return videobuf_dma_contig_user_get(mem, vb);
298 
299 		/* allocate memory for the read() method */
300 		if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size),
301 					GFP_KERNEL))
302 			return -ENOMEM;
303 		break;
304 	case V4L2_MEMORY_OVERLAY:
305 	default:
306 		dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__);
307 		return -EINVAL;
308 	}
309 
310 	return 0;
311 }
312 
313 static int __videobuf_sync(struct videobuf_queue *q,
314 			   struct videobuf_buffer *buf)
315 {
316 	struct videobuf_dma_contig_memory *mem = buf->priv;
317 	BUG_ON(!mem);
318 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
319 
320 	dma_sync_single_for_cpu(q->dev, mem->dma_handle, mem->size,
321 				DMA_FROM_DEVICE);
322 
323 	return 0;
324 }
325 
326 static int __videobuf_mmap_mapper(struct videobuf_queue *q,
327 				  struct videobuf_buffer *buf,
328 				  struct vm_area_struct *vma)
329 {
330 	struct videobuf_dma_contig_memory *mem;
331 	struct videobuf_mapping *map;
332 	int retval;
333 	unsigned long size;
334 	unsigned long pos, start = vma->vm_start;
335 	struct page *page;
336 
337 	dev_dbg(q->dev, "%s\n", __func__);
338 
339 	/* create mapping + update buffer list */
340 	map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
341 	if (!map)
342 		return -ENOMEM;
343 
344 	buf->map = map;
345 	map->q = q;
346 
347 	buf->baddr = vma->vm_start;
348 
349 	mem = buf->priv;
350 	BUG_ON(!mem);
351 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
352 
353 	if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize),
354 				GFP_KERNEL | __GFP_COMP))
355 		goto error;
356 
357 	/* Try to remap memory */
358 
359 	size = vma->vm_end - vma->vm_start;
360 	size = (size < mem->size) ? size : mem->size;
361 
362 	if (!mem->cached) {
363 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
364 		retval = remap_pfn_range(vma, vma->vm_start,
365 			 mem->dma_handle >> PAGE_SHIFT,
366 				 size, vma->vm_page_prot);
367 		if (retval) {
368 			dev_err(q->dev, "mmap: remap failed with error %d. ",
369 								retval);
370 			dma_free_coherent(q->dev, mem->size,
371 					mem->vaddr, mem->dma_handle);
372 			goto error;
373 		}
374 	} else {
375 		pos = (unsigned long)mem->vaddr;
376 
377 		while (size > 0) {
378 			page = virt_to_page((void *)pos);
379 			if (NULL == page) {
380 				dev_err(q->dev, "mmap: virt_to_page failed\n");
381 				__videobuf_dc_free(q->dev, mem);
382 				goto error;
383 			}
384 			retval = vm_insert_page(vma, start, page);
385 			if (retval) {
386 				dev_err(q->dev, "mmap: insert failed with error %d\n",
387 					retval);
388 				__videobuf_dc_free(q->dev, mem);
389 				goto error;
390 			}
391 			start += PAGE_SIZE;
392 			pos += PAGE_SIZE;
393 
394 			if (size > PAGE_SIZE)
395 				size -= PAGE_SIZE;
396 			else
397 				size = 0;
398 		}
399 	}
400 
401 	vma->vm_ops = &videobuf_vm_ops;
402 	vma->vm_flags |= VM_DONTEXPAND;
403 	vma->vm_private_data = map;
404 
405 	dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
406 		map, q, vma->vm_start, vma->vm_end,
407 		(long int)buf->bsize, vma->vm_pgoff, buf->i);
408 
409 	videobuf_vm_open(vma);
410 
411 	return 0;
412 
413 error:
414 	kfree(map);
415 	return -ENOMEM;
416 }
417 
418 static struct videobuf_qtype_ops qops = {
419 	.magic		= MAGIC_QTYPE_OPS,
420 	.alloc_vb	= __videobuf_alloc_uncached,
421 	.iolock		= __videobuf_iolock,
422 	.mmap_mapper	= __videobuf_mmap_mapper,
423 	.vaddr		= __videobuf_to_vaddr,
424 };
425 
426 static struct videobuf_qtype_ops qops_cached = {
427 	.magic		= MAGIC_QTYPE_OPS,
428 	.alloc_vb	= __videobuf_alloc_cached,
429 	.iolock		= __videobuf_iolock,
430 	.sync		= __videobuf_sync,
431 	.mmap_mapper	= __videobuf_mmap_mapper,
432 	.vaddr		= __videobuf_to_vaddr,
433 };
434 
435 void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
436 				    const struct videobuf_queue_ops *ops,
437 				    struct device *dev,
438 				    spinlock_t *irqlock,
439 				    enum v4l2_buf_type type,
440 				    enum v4l2_field field,
441 				    unsigned int msize,
442 				    void *priv,
443 				    struct mutex *ext_lock)
444 {
445 	videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
446 				 priv, &qops, ext_lock);
447 }
448 EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
449 
450 void videobuf_queue_dma_contig_init_cached(struct videobuf_queue *q,
451 					   const struct videobuf_queue_ops *ops,
452 					   struct device *dev,
453 					   spinlock_t *irqlock,
454 					   enum v4l2_buf_type type,
455 					   enum v4l2_field field,
456 					   unsigned int msize,
457 					   void *priv, struct mutex *ext_lock)
458 {
459 	videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
460 				 priv, &qops_cached, ext_lock);
461 }
462 EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init_cached);
463 
464 dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
465 {
466 	struct videobuf_dma_contig_memory *mem = buf->priv;
467 
468 	BUG_ON(!mem);
469 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
470 
471 	return mem->dma_handle;
472 }
473 EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
474 
475 void videobuf_dma_contig_free(struct videobuf_queue *q,
476 			      struct videobuf_buffer *buf)
477 {
478 	struct videobuf_dma_contig_memory *mem = buf->priv;
479 
480 	/* mmapped memory can't be freed here, otherwise mmapped region
481 	   would be released, while still needed. In this case, the memory
482 	   release should happen inside videobuf_vm_close().
483 	   So, it should free memory only if the memory were allocated for
484 	   read() operation.
485 	 */
486 	if (buf->memory != V4L2_MEMORY_USERPTR)
487 		return;
488 
489 	if (!mem)
490 		return;
491 
492 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
493 
494 	/* handle user space pointer case */
495 	if (buf->baddr) {
496 		videobuf_dma_contig_user_put(mem);
497 		return;
498 	}
499 
500 	/* read() method */
501 	if (mem->vaddr) {
502 		__videobuf_dc_free(q->dev, mem);
503 		mem->vaddr = NULL;
504 	}
505 }
506 EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
507 
508 MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
509 MODULE_AUTHOR("Magnus Damm");
510 MODULE_LICENSE("GPL");
511