1 /*
2  * helper functions for physically contiguous capture buffers
3  *
4  * The functions support hardware lacking scatter gather support
5  * (i.e. the buffers must be linear in physical memory)
6  *
7  * Copyright (c) 2008 Magnus Damm
8  *
9  * Based on videobuf-vmalloc.c,
10  * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2
15  */
16 
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/mm.h>
20 #include <linux/pagemap.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <media/videobuf-dma-contig.h>
25 
26 struct videobuf_dma_contig_memory {
27 	u32 magic;
28 	void *vaddr;
29 	dma_addr_t dma_handle;
30 	unsigned long size;
31 };
32 
33 #define MAGIC_DC_MEM 0x0733ac61
34 #define MAGIC_CHECK(is, should)						    \
35 	if (unlikely((is) != (should)))	{				    \
36 		pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
37 		BUG();							    \
38 	}
39 
40 static int __videobuf_dc_alloc(struct device *dev,
41 			       struct videobuf_dma_contig_memory *mem,
42 			       unsigned long size, gfp_t flags)
43 {
44 	mem->size = size;
45 	mem->vaddr = dma_alloc_coherent(dev, mem->size,
46 					&mem->dma_handle, flags);
47 
48 	if (!mem->vaddr) {
49 		dev_err(dev, "memory alloc size %ld failed\n", mem->size);
50 		return -ENOMEM;
51 	}
52 
53 	dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size);
54 
55 	return 0;
56 }
57 
58 static void __videobuf_dc_free(struct device *dev,
59 			       struct videobuf_dma_contig_memory *mem)
60 {
61 	dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
62 
63 	mem->vaddr = NULL;
64 }
65 
66 static void videobuf_vm_open(struct vm_area_struct *vma)
67 {
68 	struct videobuf_mapping *map = vma->vm_private_data;
69 
70 	dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
71 		map, map->count, vma->vm_start, vma->vm_end);
72 
73 	map->count++;
74 }
75 
76 static void videobuf_vm_close(struct vm_area_struct *vma)
77 {
78 	struct videobuf_mapping *map = vma->vm_private_data;
79 	struct videobuf_queue *q = map->q;
80 	int i;
81 
82 	dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
83 		map, map->count, vma->vm_start, vma->vm_end);
84 
85 	map->count--;
86 	if (0 == map->count) {
87 		struct videobuf_dma_contig_memory *mem;
88 
89 		dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
90 		videobuf_queue_lock(q);
91 
92 		/* We need first to cancel streams, before unmapping */
93 		if (q->streaming)
94 			videobuf_queue_cancel(q);
95 
96 		for (i = 0; i < VIDEO_MAX_FRAME; i++) {
97 			if (NULL == q->bufs[i])
98 				continue;
99 
100 			if (q->bufs[i]->map != map)
101 				continue;
102 
103 			mem = q->bufs[i]->priv;
104 			if (mem) {
105 				/* This callback is called only if kernel has
106 				   allocated memory and this memory is mmapped.
107 				   In this case, memory should be freed,
108 				   in order to do memory unmap.
109 				 */
110 
111 				MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
112 
113 				/* vfree is not atomic - can't be
114 				   called with IRQ's disabled
115 				 */
116 				dev_dbg(q->dev, "buf[%d] freeing %p\n",
117 					i, mem->vaddr);
118 
119 				__videobuf_dc_free(q->dev, mem);
120 				mem->vaddr = NULL;
121 			}
122 
123 			q->bufs[i]->map = NULL;
124 			q->bufs[i]->baddr = 0;
125 		}
126 
127 		kfree(map);
128 
129 		videobuf_queue_unlock(q);
130 	}
131 }
132 
133 static const struct vm_operations_struct videobuf_vm_ops = {
134 	.open	= videobuf_vm_open,
135 	.close	= videobuf_vm_close,
136 };
137 
138 /**
139  * videobuf_dma_contig_user_put() - reset pointer to user space buffer
140  * @mem: per-buffer private videobuf-dma-contig data
141  *
142  * This function resets the user space pointer
143  */
144 static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
145 {
146 	mem->dma_handle = 0;
147 	mem->size = 0;
148 }
149 
150 /**
151  * videobuf_dma_contig_user_get() - setup user space memory pointer
152  * @mem: per-buffer private videobuf-dma-contig data
153  * @vb: video buffer to map
154  *
155  * This function validates and sets up a pointer to user space memory.
156  * Only physically contiguous pfn-mapped memory is accepted.
157  *
158  * Returns 0 if successful.
159  */
160 static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
161 					struct videobuf_buffer *vb)
162 {
163 	struct mm_struct *mm = current->mm;
164 	struct vm_area_struct *vma;
165 	unsigned long prev_pfn, this_pfn;
166 	unsigned long pages_done, user_address;
167 	unsigned int offset;
168 	int ret;
169 
170 	offset = vb->baddr & ~PAGE_MASK;
171 	mem->size = PAGE_ALIGN(vb->size + offset);
172 	ret = -EINVAL;
173 
174 	down_read(&mm->mmap_sem);
175 
176 	vma = find_vma(mm, vb->baddr);
177 	if (!vma)
178 		goto out_up;
179 
180 	if ((vb->baddr + mem->size) > vma->vm_end)
181 		goto out_up;
182 
183 	pages_done = 0;
184 	prev_pfn = 0; /* kill warning */
185 	user_address = vb->baddr;
186 
187 	while (pages_done < (mem->size >> PAGE_SHIFT)) {
188 		ret = follow_pfn(vma, user_address, &this_pfn);
189 		if (ret)
190 			break;
191 
192 		if (pages_done == 0)
193 			mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
194 		else if (this_pfn != (prev_pfn + 1))
195 			ret = -EFAULT;
196 
197 		if (ret)
198 			break;
199 
200 		prev_pfn = this_pfn;
201 		user_address += PAGE_SIZE;
202 		pages_done++;
203 	}
204 
205 out_up:
206 	up_read(&current->mm->mmap_sem);
207 
208 	return ret;
209 }
210 
211 static struct videobuf_buffer *__videobuf_alloc(size_t size)
212 {
213 	struct videobuf_dma_contig_memory *mem;
214 	struct videobuf_buffer *vb;
215 
216 	vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
217 	if (vb) {
218 		vb->priv = ((char *)vb) + size;
219 		mem = vb->priv;
220 		mem->magic = MAGIC_DC_MEM;
221 	}
222 
223 	return vb;
224 }
225 
226 static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
227 {
228 	struct videobuf_dma_contig_memory *mem = buf->priv;
229 
230 	BUG_ON(!mem);
231 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
232 
233 	return mem->vaddr;
234 }
235 
236 static int __videobuf_iolock(struct videobuf_queue *q,
237 			     struct videobuf_buffer *vb,
238 			     struct v4l2_framebuffer *fbuf)
239 {
240 	struct videobuf_dma_contig_memory *mem = vb->priv;
241 
242 	BUG_ON(!mem);
243 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
244 
245 	switch (vb->memory) {
246 	case V4L2_MEMORY_MMAP:
247 		dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
248 
249 		/* All handling should be done by __videobuf_mmap_mapper() */
250 		if (!mem->vaddr) {
251 			dev_err(q->dev, "memory is not alloced/mmapped.\n");
252 			return -EINVAL;
253 		}
254 		break;
255 	case V4L2_MEMORY_USERPTR:
256 		dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
257 
258 		/* handle pointer from user space */
259 		if (vb->baddr)
260 			return videobuf_dma_contig_user_get(mem, vb);
261 
262 		/* allocate memory for the read() method */
263 		if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size),
264 					GFP_KERNEL))
265 			return -ENOMEM;
266 		break;
267 	case V4L2_MEMORY_OVERLAY:
268 	default:
269 		dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__);
270 		return -EINVAL;
271 	}
272 
273 	return 0;
274 }
275 
276 static int __videobuf_mmap_mapper(struct videobuf_queue *q,
277 				  struct videobuf_buffer *buf,
278 				  struct vm_area_struct *vma)
279 {
280 	struct videobuf_dma_contig_memory *mem;
281 	struct videobuf_mapping *map;
282 	int retval;
283 	unsigned long size;
284 
285 	dev_dbg(q->dev, "%s\n", __func__);
286 
287 	/* create mapping + update buffer list */
288 	map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
289 	if (!map)
290 		return -ENOMEM;
291 
292 	buf->map = map;
293 	map->q = q;
294 
295 	buf->baddr = vma->vm_start;
296 
297 	mem = buf->priv;
298 	BUG_ON(!mem);
299 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
300 
301 	if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize),
302 				GFP_KERNEL | __GFP_COMP))
303 		goto error;
304 
305 	/* Try to remap memory */
306 	size = vma->vm_end - vma->vm_start;
307 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
308 
309 	/* the "vm_pgoff" is just used in v4l2 to find the
310 	 * corresponding buffer data structure which is allocated
311 	 * earlier and it does not mean the offset from the physical
312 	 * buffer start address as usual. So set it to 0 to pass
313 	 * the sanity check in vm_iomap_memory().
314 	 */
315 	vma->vm_pgoff = 0;
316 
317 	retval = vm_iomap_memory(vma, mem->dma_handle, size);
318 	if (retval) {
319 		dev_err(q->dev, "mmap: remap failed with error %d. ",
320 			retval);
321 		dma_free_coherent(q->dev, mem->size,
322 				  mem->vaddr, mem->dma_handle);
323 		goto error;
324 	}
325 
326 	vma->vm_ops = &videobuf_vm_ops;
327 	vma->vm_flags |= VM_DONTEXPAND;
328 	vma->vm_private_data = map;
329 
330 	dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
331 		map, q, vma->vm_start, vma->vm_end,
332 		(long int)buf->bsize, vma->vm_pgoff, buf->i);
333 
334 	videobuf_vm_open(vma);
335 
336 	return 0;
337 
338 error:
339 	kfree(map);
340 	return -ENOMEM;
341 }
342 
343 static struct videobuf_qtype_ops qops = {
344 	.magic		= MAGIC_QTYPE_OPS,
345 	.alloc_vb	= __videobuf_alloc,
346 	.iolock		= __videobuf_iolock,
347 	.mmap_mapper	= __videobuf_mmap_mapper,
348 	.vaddr		= __videobuf_to_vaddr,
349 };
350 
351 void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
352 				    const struct videobuf_queue_ops *ops,
353 				    struct device *dev,
354 				    spinlock_t *irqlock,
355 				    enum v4l2_buf_type type,
356 				    enum v4l2_field field,
357 				    unsigned int msize,
358 				    void *priv,
359 				    struct mutex *ext_lock)
360 {
361 	videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
362 				 priv, &qops, ext_lock);
363 }
364 EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
365 
366 dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
367 {
368 	struct videobuf_dma_contig_memory *mem = buf->priv;
369 
370 	BUG_ON(!mem);
371 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
372 
373 	return mem->dma_handle;
374 }
375 EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
376 
377 void videobuf_dma_contig_free(struct videobuf_queue *q,
378 			      struct videobuf_buffer *buf)
379 {
380 	struct videobuf_dma_contig_memory *mem = buf->priv;
381 
382 	/* mmapped memory can't be freed here, otherwise mmapped region
383 	   would be released, while still needed. In this case, the memory
384 	   release should happen inside videobuf_vm_close().
385 	   So, it should free memory only if the memory were allocated for
386 	   read() operation.
387 	 */
388 	if (buf->memory != V4L2_MEMORY_USERPTR)
389 		return;
390 
391 	if (!mem)
392 		return;
393 
394 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
395 
396 	/* handle user space pointer case */
397 	if (buf->baddr) {
398 		videobuf_dma_contig_user_put(mem);
399 		return;
400 	}
401 
402 	/* read() method */
403 	if (mem->vaddr) {
404 		__videobuf_dc_free(q->dev, mem);
405 		mem->vaddr = NULL;
406 	}
407 }
408 EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
409 
410 MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
411 MODULE_AUTHOR("Magnus Damm");
412 MODULE_LICENSE("GPL");
413