1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * helper functions for physically contiguous capture buffers
4  *
5  * The functions support hardware lacking scatter gather support
6  * (i.e. the buffers must be linear in physical memory)
7  *
8  * Copyright (c) 2008 Magnus Damm
9  *
10  * Based on videobuf-vmalloc.c,
11  * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
12  */
13 
14 #include <linux/init.h>
15 #include <linux/module.h>
16 #include <linux/mm.h>
17 #include <linux/pagemap.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <media/videobuf-dma-contig.h>
22 
23 struct videobuf_dma_contig_memory {
24 	u32 magic;
25 	void *vaddr;
26 	dma_addr_t dma_handle;
27 	unsigned long size;
28 };
29 
30 #define MAGIC_DC_MEM 0x0733ac61
31 #define MAGIC_CHECK(is, should)						    \
32 	if (unlikely((is) != (should)))	{				    \
33 		pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
34 		BUG();							    \
35 	}
36 
37 static int __videobuf_dc_alloc(struct device *dev,
38 			       struct videobuf_dma_contig_memory *mem,
39 			       unsigned long size, gfp_t flags)
40 {
41 	mem->size = size;
42 	mem->vaddr = dma_alloc_coherent(dev, mem->size,
43 					&mem->dma_handle, flags);
44 
45 	if (!mem->vaddr) {
46 		dev_err(dev, "memory alloc size %ld failed\n", mem->size);
47 		return -ENOMEM;
48 	}
49 
50 	dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size);
51 
52 	return 0;
53 }
54 
55 static void __videobuf_dc_free(struct device *dev,
56 			       struct videobuf_dma_contig_memory *mem)
57 {
58 	dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
59 
60 	mem->vaddr = NULL;
61 }
62 
63 static void videobuf_vm_open(struct vm_area_struct *vma)
64 {
65 	struct videobuf_mapping *map = vma->vm_private_data;
66 
67 	dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
68 		map, map->count, vma->vm_start, vma->vm_end);
69 
70 	map->count++;
71 }
72 
73 static void videobuf_vm_close(struct vm_area_struct *vma)
74 {
75 	struct videobuf_mapping *map = vma->vm_private_data;
76 	struct videobuf_queue *q = map->q;
77 	int i;
78 
79 	dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
80 		map, map->count, vma->vm_start, vma->vm_end);
81 
82 	map->count--;
83 	if (0 == map->count) {
84 		struct videobuf_dma_contig_memory *mem;
85 
86 		dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
87 		videobuf_queue_lock(q);
88 
89 		/* We need first to cancel streams, before unmapping */
90 		if (q->streaming)
91 			videobuf_queue_cancel(q);
92 
93 		for (i = 0; i < VIDEO_MAX_FRAME; i++) {
94 			if (NULL == q->bufs[i])
95 				continue;
96 
97 			if (q->bufs[i]->map != map)
98 				continue;
99 
100 			mem = q->bufs[i]->priv;
101 			if (mem) {
102 				/* This callback is called only if kernel has
103 				   allocated memory and this memory is mmapped.
104 				   In this case, memory should be freed,
105 				   in order to do memory unmap.
106 				 */
107 
108 				MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
109 
110 				/* vfree is not atomic - can't be
111 				   called with IRQ's disabled
112 				 */
113 				dev_dbg(q->dev, "buf[%d] freeing %p\n",
114 					i, mem->vaddr);
115 
116 				__videobuf_dc_free(q->dev, mem);
117 				mem->vaddr = NULL;
118 			}
119 
120 			q->bufs[i]->map = NULL;
121 			q->bufs[i]->baddr = 0;
122 		}
123 
124 		kfree(map);
125 
126 		videobuf_queue_unlock(q);
127 	}
128 }
129 
130 static const struct vm_operations_struct videobuf_vm_ops = {
131 	.open	= videobuf_vm_open,
132 	.close	= videobuf_vm_close,
133 };
134 
135 /**
136  * videobuf_dma_contig_user_put() - reset pointer to user space buffer
137  * @mem: per-buffer private videobuf-dma-contig data
138  *
139  * This function resets the user space pointer
140  */
141 static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
142 {
143 	mem->dma_handle = 0;
144 	mem->size = 0;
145 }
146 
147 /**
148  * videobuf_dma_contig_user_get() - setup user space memory pointer
149  * @mem: per-buffer private videobuf-dma-contig data
150  * @vb: video buffer to map
151  *
152  * This function validates and sets up a pointer to user space memory.
153  * Only physically contiguous pfn-mapped memory is accepted.
154  *
155  * Returns 0 if successful.
156  */
157 static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
158 					struct videobuf_buffer *vb)
159 {
160 	unsigned long untagged_baddr = untagged_addr(vb->baddr);
161 	struct mm_struct *mm = current->mm;
162 	struct vm_area_struct *vma;
163 	unsigned long prev_pfn, this_pfn;
164 	unsigned long pages_done, user_address;
165 	unsigned int offset;
166 	int ret;
167 
168 	offset = untagged_baddr & ~PAGE_MASK;
169 	mem->size = PAGE_ALIGN(vb->size + offset);
170 	ret = -EINVAL;
171 
172 	down_read(&mm->mmap_sem);
173 
174 	vma = find_vma(mm, untagged_baddr);
175 	if (!vma)
176 		goto out_up;
177 
178 	if ((untagged_baddr + mem->size) > vma->vm_end)
179 		goto out_up;
180 
181 	pages_done = 0;
182 	prev_pfn = 0; /* kill warning */
183 	user_address = untagged_baddr;
184 
185 	while (pages_done < (mem->size >> PAGE_SHIFT)) {
186 		ret = follow_pfn(vma, user_address, &this_pfn);
187 		if (ret)
188 			break;
189 
190 		if (pages_done == 0)
191 			mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
192 		else if (this_pfn != (prev_pfn + 1))
193 			ret = -EFAULT;
194 
195 		if (ret)
196 			break;
197 
198 		prev_pfn = this_pfn;
199 		user_address += PAGE_SIZE;
200 		pages_done++;
201 	}
202 
203 out_up:
204 	up_read(&current->mm->mmap_sem);
205 
206 	return ret;
207 }
208 
209 static struct videobuf_buffer *__videobuf_alloc(size_t size)
210 {
211 	struct videobuf_dma_contig_memory *mem;
212 	struct videobuf_buffer *vb;
213 
214 	vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
215 	if (vb) {
216 		vb->priv = ((char *)vb) + size;
217 		mem = vb->priv;
218 		mem->magic = MAGIC_DC_MEM;
219 	}
220 
221 	return vb;
222 }
223 
224 static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
225 {
226 	struct videobuf_dma_contig_memory *mem = buf->priv;
227 
228 	BUG_ON(!mem);
229 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
230 
231 	return mem->vaddr;
232 }
233 
234 static int __videobuf_iolock(struct videobuf_queue *q,
235 			     struct videobuf_buffer *vb,
236 			     struct v4l2_framebuffer *fbuf)
237 {
238 	struct videobuf_dma_contig_memory *mem = vb->priv;
239 
240 	BUG_ON(!mem);
241 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
242 
243 	switch (vb->memory) {
244 	case V4L2_MEMORY_MMAP:
245 		dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
246 
247 		/* All handling should be done by __videobuf_mmap_mapper() */
248 		if (!mem->vaddr) {
249 			dev_err(q->dev, "memory is not allocated/mmapped.\n");
250 			return -EINVAL;
251 		}
252 		break;
253 	case V4L2_MEMORY_USERPTR:
254 		dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
255 
256 		/* handle pointer from user space */
257 		if (vb->baddr)
258 			return videobuf_dma_contig_user_get(mem, vb);
259 
260 		/* allocate memory for the read() method */
261 		if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size),
262 					GFP_KERNEL))
263 			return -ENOMEM;
264 		break;
265 	case V4L2_MEMORY_OVERLAY:
266 	default:
267 		dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__);
268 		return -EINVAL;
269 	}
270 
271 	return 0;
272 }
273 
274 static int __videobuf_mmap_mapper(struct videobuf_queue *q,
275 				  struct videobuf_buffer *buf,
276 				  struct vm_area_struct *vma)
277 {
278 	struct videobuf_dma_contig_memory *mem;
279 	struct videobuf_mapping *map;
280 	int retval;
281 
282 	dev_dbg(q->dev, "%s\n", __func__);
283 
284 	/* create mapping + update buffer list */
285 	map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
286 	if (!map)
287 		return -ENOMEM;
288 
289 	buf->map = map;
290 	map->q = q;
291 
292 	buf->baddr = vma->vm_start;
293 
294 	mem = buf->priv;
295 	BUG_ON(!mem);
296 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
297 
298 	if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize),
299 				GFP_KERNEL | __GFP_COMP))
300 		goto error;
301 
302 	/* Try to remap memory */
303 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
304 
305 	/* the "vm_pgoff" is just used in v4l2 to find the
306 	 * corresponding buffer data structure which is allocated
307 	 * earlier and it does not mean the offset from the physical
308 	 * buffer start address as usual. So set it to 0 to pass
309 	 * the sanity check in vm_iomap_memory().
310 	 */
311 	vma->vm_pgoff = 0;
312 
313 	retval = vm_iomap_memory(vma, mem->dma_handle, mem->size);
314 	if (retval) {
315 		dev_err(q->dev, "mmap: remap failed with error %d. ",
316 			retval);
317 		dma_free_coherent(q->dev, mem->size,
318 				  mem->vaddr, mem->dma_handle);
319 		goto error;
320 	}
321 
322 	vma->vm_ops = &videobuf_vm_ops;
323 	vma->vm_flags |= VM_DONTEXPAND;
324 	vma->vm_private_data = map;
325 
326 	dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
327 		map, q, vma->vm_start, vma->vm_end,
328 		(long int)buf->bsize, vma->vm_pgoff, buf->i);
329 
330 	videobuf_vm_open(vma);
331 
332 	return 0;
333 
334 error:
335 	kfree(map);
336 	return -ENOMEM;
337 }
338 
339 static struct videobuf_qtype_ops qops = {
340 	.magic		= MAGIC_QTYPE_OPS,
341 	.alloc_vb	= __videobuf_alloc,
342 	.iolock		= __videobuf_iolock,
343 	.mmap_mapper	= __videobuf_mmap_mapper,
344 	.vaddr		= __videobuf_to_vaddr,
345 };
346 
347 void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
348 				    const struct videobuf_queue_ops *ops,
349 				    struct device *dev,
350 				    spinlock_t *irqlock,
351 				    enum v4l2_buf_type type,
352 				    enum v4l2_field field,
353 				    unsigned int msize,
354 				    void *priv,
355 				    struct mutex *ext_lock)
356 {
357 	videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
358 				 priv, &qops, ext_lock);
359 }
360 EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
361 
362 dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
363 {
364 	struct videobuf_dma_contig_memory *mem = buf->priv;
365 
366 	BUG_ON(!mem);
367 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
368 
369 	return mem->dma_handle;
370 }
371 EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
372 
373 void videobuf_dma_contig_free(struct videobuf_queue *q,
374 			      struct videobuf_buffer *buf)
375 {
376 	struct videobuf_dma_contig_memory *mem = buf->priv;
377 
378 	/* mmapped memory can't be freed here, otherwise mmapped region
379 	   would be released, while still needed. In this case, the memory
380 	   release should happen inside videobuf_vm_close().
381 	   So, it should free memory only if the memory were allocated for
382 	   read() operation.
383 	 */
384 	if (buf->memory != V4L2_MEMORY_USERPTR)
385 		return;
386 
387 	if (!mem)
388 		return;
389 
390 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
391 
392 	/* handle user space pointer case */
393 	if (buf->baddr) {
394 		videobuf_dma_contig_user_put(mem);
395 		return;
396 	}
397 
398 	/* read() method */
399 	if (mem->vaddr) {
400 		__videobuf_dc_free(q->dev, mem);
401 		mem->vaddr = NULL;
402 	}
403 }
404 EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
405 
406 MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
407 MODULE_AUTHOR("Magnus Damm");
408 MODULE_LICENSE("GPL");
409