1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * helper functions for physically contiguous capture buffers
4  *
5  * The functions support hardware lacking scatter gather support
6  * (i.e. the buffers must be linear in physical memory)
7  *
8  * Copyright (c) 2008 Magnus Damm
9  *
10  * Based on videobuf-vmalloc.c,
11  * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
12  */
13 
14 #include <linux/init.h>
15 #include <linux/module.h>
16 #include <linux/mm.h>
17 #include <linux/pagemap.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <media/videobuf-dma-contig.h>
22 
23 struct videobuf_dma_contig_memory {
24 	u32 magic;
25 	void *vaddr;
26 	dma_addr_t dma_handle;
27 	unsigned long size;
28 };
29 
30 #define MAGIC_DC_MEM 0x0733ac61
31 #define MAGIC_CHECK(is, should)						    \
32 	if (unlikely((is) != (should)))	{				    \
33 		pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
34 		BUG();							    \
35 	}
36 
__videobuf_dc_alloc(struct device * dev,struct videobuf_dma_contig_memory * mem,unsigned long size)37 static int __videobuf_dc_alloc(struct device *dev,
38 			       struct videobuf_dma_contig_memory *mem,
39 			       unsigned long size)
40 {
41 	mem->size = size;
42 	mem->vaddr = dma_alloc_coherent(dev, mem->size, &mem->dma_handle,
43 					GFP_KERNEL);
44 	if (!mem->vaddr) {
45 		dev_err(dev, "memory alloc size %ld failed\n", mem->size);
46 		return -ENOMEM;
47 	}
48 
49 	dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size);
50 
51 	return 0;
52 }
53 
__videobuf_dc_free(struct device * dev,struct videobuf_dma_contig_memory * mem)54 static void __videobuf_dc_free(struct device *dev,
55 			       struct videobuf_dma_contig_memory *mem)
56 {
57 	dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
58 
59 	mem->vaddr = NULL;
60 }
61 
videobuf_vm_open(struct vm_area_struct * vma)62 static void videobuf_vm_open(struct vm_area_struct *vma)
63 {
64 	struct videobuf_mapping *map = vma->vm_private_data;
65 
66 	dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
67 		map, map->count, vma->vm_start, vma->vm_end);
68 
69 	map->count++;
70 }
71 
videobuf_vm_close(struct vm_area_struct * vma)72 static void videobuf_vm_close(struct vm_area_struct *vma)
73 {
74 	struct videobuf_mapping *map = vma->vm_private_data;
75 	struct videobuf_queue *q = map->q;
76 	int i;
77 
78 	dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
79 		map, map->count, vma->vm_start, vma->vm_end);
80 
81 	map->count--;
82 	if (0 == map->count) {
83 		struct videobuf_dma_contig_memory *mem;
84 
85 		dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
86 		videobuf_queue_lock(q);
87 
88 		/* We need first to cancel streams, before unmapping */
89 		if (q->streaming)
90 			videobuf_queue_cancel(q);
91 
92 		for (i = 0; i < VIDEO_MAX_FRAME; i++) {
93 			if (NULL == q->bufs[i])
94 				continue;
95 
96 			if (q->bufs[i]->map != map)
97 				continue;
98 
99 			mem = q->bufs[i]->priv;
100 			if (mem) {
101 				/* This callback is called only if kernel has
102 				   allocated memory and this memory is mmapped.
103 				   In this case, memory should be freed,
104 				   in order to do memory unmap.
105 				 */
106 
107 				MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
108 
109 				/* vfree is not atomic - can't be
110 				   called with IRQ's disabled
111 				 */
112 				dev_dbg(q->dev, "buf[%d] freeing %p\n",
113 					i, mem->vaddr);
114 
115 				__videobuf_dc_free(q->dev, mem);
116 				mem->vaddr = NULL;
117 			}
118 
119 			q->bufs[i]->map = NULL;
120 			q->bufs[i]->baddr = 0;
121 		}
122 
123 		kfree(map);
124 
125 		videobuf_queue_unlock(q);
126 	}
127 }
128 
129 static const struct vm_operations_struct videobuf_vm_ops = {
130 	.open	= videobuf_vm_open,
131 	.close	= videobuf_vm_close,
132 };
133 
134 /**
135  * videobuf_dma_contig_user_put() - reset pointer to user space buffer
136  * @mem: per-buffer private videobuf-dma-contig data
137  *
138  * This function resets the user space pointer
139  */
videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory * mem)140 static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
141 {
142 	mem->dma_handle = 0;
143 	mem->size = 0;
144 }
145 
146 /**
147  * videobuf_dma_contig_user_get() - setup user space memory pointer
148  * @mem: per-buffer private videobuf-dma-contig data
149  * @vb: video buffer to map
150  *
151  * This function validates and sets up a pointer to user space memory.
152  * Only physically contiguous pfn-mapped memory is accepted.
153  *
154  * Returns 0 if successful.
155  */
videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory * mem,struct videobuf_buffer * vb)156 static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
157 					struct videobuf_buffer *vb)
158 {
159 	unsigned long untagged_baddr = untagged_addr(vb->baddr);
160 	struct mm_struct *mm = current->mm;
161 	struct vm_area_struct *vma;
162 	unsigned long prev_pfn, this_pfn;
163 	unsigned long pages_done, user_address;
164 	unsigned int offset;
165 	int ret;
166 
167 	offset = untagged_baddr & ~PAGE_MASK;
168 	mem->size = PAGE_ALIGN(vb->size + offset);
169 	ret = -EINVAL;
170 
171 	mmap_read_lock(mm);
172 
173 	vma = find_vma(mm, untagged_baddr);
174 	if (!vma)
175 		goto out_up;
176 
177 	if ((untagged_baddr + mem->size) > vma->vm_end)
178 		goto out_up;
179 
180 	pages_done = 0;
181 	prev_pfn = 0; /* kill warning */
182 	user_address = untagged_baddr;
183 
184 	while (pages_done < (mem->size >> PAGE_SHIFT)) {
185 		ret = follow_pfn(vma, user_address, &this_pfn);
186 		if (ret)
187 			break;
188 
189 		if (pages_done == 0)
190 			mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
191 		else if (this_pfn != (prev_pfn + 1))
192 			ret = -EFAULT;
193 
194 		if (ret)
195 			break;
196 
197 		prev_pfn = this_pfn;
198 		user_address += PAGE_SIZE;
199 		pages_done++;
200 	}
201 
202 out_up:
203 	mmap_read_unlock(current->mm);
204 
205 	return ret;
206 }
207 
__videobuf_alloc(size_t size)208 static struct videobuf_buffer *__videobuf_alloc(size_t size)
209 {
210 	struct videobuf_dma_contig_memory *mem;
211 	struct videobuf_buffer *vb;
212 
213 	vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
214 	if (vb) {
215 		vb->priv = ((char *)vb) + size;
216 		mem = vb->priv;
217 		mem->magic = MAGIC_DC_MEM;
218 	}
219 
220 	return vb;
221 }
222 
__videobuf_to_vaddr(struct videobuf_buffer * buf)223 static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
224 {
225 	struct videobuf_dma_contig_memory *mem = buf->priv;
226 
227 	BUG_ON(!mem);
228 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
229 
230 	return mem->vaddr;
231 }
232 
__videobuf_iolock(struct videobuf_queue * q,struct videobuf_buffer * vb,struct v4l2_framebuffer * fbuf)233 static int __videobuf_iolock(struct videobuf_queue *q,
234 			     struct videobuf_buffer *vb,
235 			     struct v4l2_framebuffer *fbuf)
236 {
237 	struct videobuf_dma_contig_memory *mem = vb->priv;
238 
239 	BUG_ON(!mem);
240 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
241 
242 	switch (vb->memory) {
243 	case V4L2_MEMORY_MMAP:
244 		dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
245 
246 		/* All handling should be done by __videobuf_mmap_mapper() */
247 		if (!mem->vaddr) {
248 			dev_err(q->dev, "memory is not allocated/mmapped.\n");
249 			return -EINVAL;
250 		}
251 		break;
252 	case V4L2_MEMORY_USERPTR:
253 		dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
254 
255 		/* handle pointer from user space */
256 		if (vb->baddr)
257 			return videobuf_dma_contig_user_get(mem, vb);
258 
259 		/* allocate memory for the read() method */
260 		if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size)))
261 			return -ENOMEM;
262 		break;
263 	case V4L2_MEMORY_OVERLAY:
264 	default:
265 		dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__);
266 		return -EINVAL;
267 	}
268 
269 	return 0;
270 }
271 
__videobuf_mmap_mapper(struct videobuf_queue * q,struct videobuf_buffer * buf,struct vm_area_struct * vma)272 static int __videobuf_mmap_mapper(struct videobuf_queue *q,
273 				  struct videobuf_buffer *buf,
274 				  struct vm_area_struct *vma)
275 {
276 	struct videobuf_dma_contig_memory *mem;
277 	struct videobuf_mapping *map;
278 	int retval;
279 
280 	dev_dbg(q->dev, "%s\n", __func__);
281 
282 	/* create mapping + update buffer list */
283 	map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
284 	if (!map)
285 		return -ENOMEM;
286 
287 	buf->map = map;
288 	map->q = q;
289 
290 	buf->baddr = vma->vm_start;
291 
292 	mem = buf->priv;
293 	BUG_ON(!mem);
294 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
295 
296 	if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize)))
297 		goto error;
298 
299 	/* the "vm_pgoff" is just used in v4l2 to find the
300 	 * corresponding buffer data structure which is allocated
301 	 * earlier and it does not mean the offset from the physical
302 	 * buffer start address as usual. So set it to 0 to pass
303 	 * the sanity check in dma_mmap_coherent().
304 	 */
305 	vma->vm_pgoff = 0;
306 	retval = dma_mmap_coherent(q->dev, vma, mem->vaddr, mem->dma_handle,
307 				   mem->size);
308 	if (retval) {
309 		dev_err(q->dev, "mmap: remap failed with error %d. ",
310 			retval);
311 		dma_free_coherent(q->dev, mem->size,
312 				  mem->vaddr, mem->dma_handle);
313 		goto error;
314 	}
315 
316 	vma->vm_ops = &videobuf_vm_ops;
317 	vm_flags_set(vma, VM_DONTEXPAND);
318 	vma->vm_private_data = map;
319 
320 	dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
321 		map, q, vma->vm_start, vma->vm_end,
322 		(long int)buf->bsize, vma->vm_pgoff, buf->i);
323 
324 	videobuf_vm_open(vma);
325 
326 	return 0;
327 
328 error:
329 	kfree(map);
330 	return -ENOMEM;
331 }
332 
333 static struct videobuf_qtype_ops qops = {
334 	.magic		= MAGIC_QTYPE_OPS,
335 	.alloc_vb	= __videobuf_alloc,
336 	.iolock		= __videobuf_iolock,
337 	.mmap_mapper	= __videobuf_mmap_mapper,
338 	.vaddr		= __videobuf_to_vaddr,
339 };
340 
videobuf_queue_dma_contig_init(struct videobuf_queue * q,const struct videobuf_queue_ops * ops,struct device * dev,spinlock_t * irqlock,enum v4l2_buf_type type,enum v4l2_field field,unsigned int msize,void * priv,struct mutex * ext_lock)341 void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
342 				    const struct videobuf_queue_ops *ops,
343 				    struct device *dev,
344 				    spinlock_t *irqlock,
345 				    enum v4l2_buf_type type,
346 				    enum v4l2_field field,
347 				    unsigned int msize,
348 				    void *priv,
349 				    struct mutex *ext_lock)
350 {
351 	videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
352 				 priv, &qops, ext_lock);
353 }
354 EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
355 
videobuf_to_dma_contig(struct videobuf_buffer * buf)356 dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
357 {
358 	struct videobuf_dma_contig_memory *mem = buf->priv;
359 
360 	BUG_ON(!mem);
361 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
362 
363 	return mem->dma_handle;
364 }
365 EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
366 
videobuf_dma_contig_free(struct videobuf_queue * q,struct videobuf_buffer * buf)367 void videobuf_dma_contig_free(struct videobuf_queue *q,
368 			      struct videobuf_buffer *buf)
369 {
370 	struct videobuf_dma_contig_memory *mem = buf->priv;
371 
372 	/* mmapped memory can't be freed here, otherwise mmapped region
373 	   would be released, while still needed. In this case, the memory
374 	   release should happen inside videobuf_vm_close().
375 	   So, it should free memory only if the memory were allocated for
376 	   read() operation.
377 	 */
378 	if (buf->memory != V4L2_MEMORY_USERPTR)
379 		return;
380 
381 	if (!mem)
382 		return;
383 
384 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
385 
386 	/* handle user space pointer case */
387 	if (buf->baddr) {
388 		videobuf_dma_contig_user_put(mem);
389 		return;
390 	}
391 
392 	/* read() method */
393 	if (mem->vaddr) {
394 		__videobuf_dc_free(q->dev, mem);
395 		mem->vaddr = NULL;
396 	}
397 }
398 EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
399 
400 MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
401 MODULE_AUTHOR("Magnus Damm");
402 MODULE_LICENSE("GPL");
403