1 /*
2  * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12 
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/refcount.h>
16 #include <linux/scatterlist.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dma-mapping.h>
20 
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-dma-contig.h>
23 #include <media/videobuf2-memops.h>
24 
25 struct vb2_dc_buf {
26 	struct device			*dev;
27 	void				*vaddr;
28 	unsigned long			size;
29 	void				*cookie;
30 	dma_addr_t			dma_addr;
31 	unsigned long			attrs;
32 	enum dma_data_direction		dma_dir;
33 	struct sg_table			*dma_sgt;
34 	struct frame_vector		*vec;
35 
36 	/* MMAP related */
37 	struct vb2_vmarea_handler	handler;
38 	refcount_t			refcount;
39 	struct sg_table			*sgt_base;
40 
41 	/* DMABUF related */
42 	struct dma_buf_attachment	*db_attach;
43 };
44 
45 /*********************************************/
46 /*        scatterlist table functions        */
47 /*********************************************/
48 
49 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
50 {
51 	struct scatterlist *s;
52 	dma_addr_t expected = sg_dma_address(sgt->sgl);
53 	unsigned int i;
54 	unsigned long size = 0;
55 
56 	for_each_sg(sgt->sgl, s, sgt->nents, i) {
57 		if (sg_dma_address(s) != expected)
58 			break;
59 		expected = sg_dma_address(s) + sg_dma_len(s);
60 		size += sg_dma_len(s);
61 	}
62 	return size;
63 }
64 
65 /*********************************************/
66 /*         callbacks for all buffers         */
67 /*********************************************/
68 
69 static void *vb2_dc_cookie(void *buf_priv)
70 {
71 	struct vb2_dc_buf *buf = buf_priv;
72 
73 	return &buf->dma_addr;
74 }
75 
76 static void *vb2_dc_vaddr(void *buf_priv)
77 {
78 	struct vb2_dc_buf *buf = buf_priv;
79 
80 	if (!buf->vaddr && buf->db_attach)
81 		buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
82 
83 	return buf->vaddr;
84 }
85 
86 static unsigned int vb2_dc_num_users(void *buf_priv)
87 {
88 	struct vb2_dc_buf *buf = buf_priv;
89 
90 	return refcount_read(&buf->refcount);
91 }
92 
93 static void vb2_dc_prepare(void *buf_priv)
94 {
95 	struct vb2_dc_buf *buf = buf_priv;
96 	struct sg_table *sgt = buf->dma_sgt;
97 
98 	/* DMABUF exporter will flush the cache for us */
99 	if (!sgt || buf->db_attach)
100 		return;
101 
102 	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
103 			       buf->dma_dir);
104 }
105 
106 static void vb2_dc_finish(void *buf_priv)
107 {
108 	struct vb2_dc_buf *buf = buf_priv;
109 	struct sg_table *sgt = buf->dma_sgt;
110 
111 	/* DMABUF exporter will flush the cache for us */
112 	if (!sgt || buf->db_attach)
113 		return;
114 
115 	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
116 }
117 
118 /*********************************************/
119 /*        callbacks for MMAP buffers         */
120 /*********************************************/
121 
122 static void vb2_dc_put(void *buf_priv)
123 {
124 	struct vb2_dc_buf *buf = buf_priv;
125 
126 	if (!refcount_dec_and_test(&buf->refcount))
127 		return;
128 
129 	if (buf->sgt_base) {
130 		sg_free_table(buf->sgt_base);
131 		kfree(buf->sgt_base);
132 	}
133 	dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
134 		       buf->attrs);
135 	put_device(buf->dev);
136 	kfree(buf);
137 }
138 
139 static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
140 			  unsigned long size, enum dma_data_direction dma_dir,
141 			  gfp_t gfp_flags)
142 {
143 	struct vb2_dc_buf *buf;
144 
145 	if (WARN_ON(!dev))
146 		return ERR_PTR(-EINVAL);
147 
148 	buf = kzalloc(sizeof *buf, GFP_KERNEL);
149 	if (!buf)
150 		return ERR_PTR(-ENOMEM);
151 
152 	if (attrs)
153 		buf->attrs = attrs;
154 	buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
155 					GFP_KERNEL | gfp_flags, buf->attrs);
156 	if (!buf->cookie) {
157 		dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
158 		kfree(buf);
159 		return ERR_PTR(-ENOMEM);
160 	}
161 
162 	if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
163 		buf->vaddr = buf->cookie;
164 
165 	/* Prevent the device from being released while the buffer is used */
166 	buf->dev = get_device(dev);
167 	buf->size = size;
168 	buf->dma_dir = dma_dir;
169 
170 	buf->handler.refcount = &buf->refcount;
171 	buf->handler.put = vb2_dc_put;
172 	buf->handler.arg = buf;
173 
174 	refcount_set(&buf->refcount, 1);
175 
176 	return buf;
177 }
178 
179 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
180 {
181 	struct vb2_dc_buf *buf = buf_priv;
182 	int ret;
183 
184 	if (!buf) {
185 		printk(KERN_ERR "No buffer to map\n");
186 		return -EINVAL;
187 	}
188 
189 	/*
190 	 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
191 	 * map whole buffer
192 	 */
193 	vma->vm_pgoff = 0;
194 
195 	ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
196 		buf->dma_addr, buf->size, buf->attrs);
197 
198 	if (ret) {
199 		pr_err("Remapping memory failed, error: %d\n", ret);
200 		return ret;
201 	}
202 
203 	vma->vm_flags		|= VM_DONTEXPAND | VM_DONTDUMP;
204 	vma->vm_private_data	= &buf->handler;
205 	vma->vm_ops		= &vb2_common_vm_ops;
206 
207 	vma->vm_ops->open(vma);
208 
209 	pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
210 		__func__, (unsigned long)buf->dma_addr, vma->vm_start,
211 		buf->size);
212 
213 	return 0;
214 }
215 
216 /*********************************************/
217 /*         DMABUF ops for exporters          */
218 /*********************************************/
219 
220 struct vb2_dc_attachment {
221 	struct sg_table sgt;
222 	enum dma_data_direction dma_dir;
223 };
224 
225 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
226 	struct dma_buf_attachment *dbuf_attach)
227 {
228 	struct vb2_dc_attachment *attach;
229 	unsigned int i;
230 	struct scatterlist *rd, *wr;
231 	struct sg_table *sgt;
232 	struct vb2_dc_buf *buf = dbuf->priv;
233 	int ret;
234 
235 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
236 	if (!attach)
237 		return -ENOMEM;
238 
239 	sgt = &attach->sgt;
240 	/* Copy the buf->base_sgt scatter list to the attachment, as we can't
241 	 * map the same scatter list to multiple attachments at the same time.
242 	 */
243 	ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
244 	if (ret) {
245 		kfree(attach);
246 		return -ENOMEM;
247 	}
248 
249 	rd = buf->sgt_base->sgl;
250 	wr = sgt->sgl;
251 	for (i = 0; i < sgt->orig_nents; ++i) {
252 		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
253 		rd = sg_next(rd);
254 		wr = sg_next(wr);
255 	}
256 
257 	attach->dma_dir = DMA_NONE;
258 	dbuf_attach->priv = attach;
259 
260 	return 0;
261 }
262 
263 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
264 	struct dma_buf_attachment *db_attach)
265 {
266 	struct vb2_dc_attachment *attach = db_attach->priv;
267 	struct sg_table *sgt;
268 
269 	if (!attach)
270 		return;
271 
272 	sgt = &attach->sgt;
273 
274 	/* release the scatterlist cache */
275 	if (attach->dma_dir != DMA_NONE)
276 		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
277 			attach->dma_dir);
278 	sg_free_table(sgt);
279 	kfree(attach);
280 	db_attach->priv = NULL;
281 }
282 
283 static struct sg_table *vb2_dc_dmabuf_ops_map(
284 	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
285 {
286 	struct vb2_dc_attachment *attach = db_attach->priv;
287 	/* stealing dmabuf mutex to serialize map/unmap operations */
288 	struct mutex *lock = &db_attach->dmabuf->lock;
289 	struct sg_table *sgt;
290 
291 	mutex_lock(lock);
292 
293 	sgt = &attach->sgt;
294 	/* return previously mapped sg table */
295 	if (attach->dma_dir == dma_dir) {
296 		mutex_unlock(lock);
297 		return sgt;
298 	}
299 
300 	/* release any previous cache */
301 	if (attach->dma_dir != DMA_NONE) {
302 		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
303 			attach->dma_dir);
304 		attach->dma_dir = DMA_NONE;
305 	}
306 
307 	/* mapping to the client with new direction */
308 	sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
309 				dma_dir);
310 	if (!sgt->nents) {
311 		pr_err("failed to map scatterlist\n");
312 		mutex_unlock(lock);
313 		return ERR_PTR(-EIO);
314 	}
315 
316 	attach->dma_dir = dma_dir;
317 
318 	mutex_unlock(lock);
319 
320 	return sgt;
321 }
322 
323 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
324 	struct sg_table *sgt, enum dma_data_direction dma_dir)
325 {
326 	/* nothing to be done here */
327 }
328 
329 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
330 {
331 	/* drop reference obtained in vb2_dc_get_dmabuf */
332 	vb2_dc_put(dbuf->priv);
333 }
334 
335 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
336 {
337 	struct vb2_dc_buf *buf = dbuf->priv;
338 
339 	return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
340 }
341 
342 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
343 {
344 	struct vb2_dc_buf *buf = dbuf->priv;
345 
346 	return buf->vaddr;
347 }
348 
349 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
350 	struct vm_area_struct *vma)
351 {
352 	return vb2_dc_mmap(dbuf->priv, vma);
353 }
354 
355 static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
356 	.attach = vb2_dc_dmabuf_ops_attach,
357 	.detach = vb2_dc_dmabuf_ops_detach,
358 	.map_dma_buf = vb2_dc_dmabuf_ops_map,
359 	.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
360 	.map = vb2_dc_dmabuf_ops_kmap,
361 	.vmap = vb2_dc_dmabuf_ops_vmap,
362 	.mmap = vb2_dc_dmabuf_ops_mmap,
363 	.release = vb2_dc_dmabuf_ops_release,
364 };
365 
366 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
367 {
368 	int ret;
369 	struct sg_table *sgt;
370 
371 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
372 	if (!sgt) {
373 		dev_err(buf->dev, "failed to alloc sg table\n");
374 		return NULL;
375 	}
376 
377 	ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
378 		buf->size, buf->attrs);
379 	if (ret < 0) {
380 		dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
381 		kfree(sgt);
382 		return NULL;
383 	}
384 
385 	return sgt;
386 }
387 
388 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
389 {
390 	struct vb2_dc_buf *buf = buf_priv;
391 	struct dma_buf *dbuf;
392 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
393 
394 	exp_info.ops = &vb2_dc_dmabuf_ops;
395 	exp_info.size = buf->size;
396 	exp_info.flags = flags;
397 	exp_info.priv = buf;
398 
399 	if (!buf->sgt_base)
400 		buf->sgt_base = vb2_dc_get_base_sgt(buf);
401 
402 	if (WARN_ON(!buf->sgt_base))
403 		return NULL;
404 
405 	dbuf = dma_buf_export(&exp_info);
406 	if (IS_ERR(dbuf))
407 		return NULL;
408 
409 	/* dmabuf keeps reference to vb2 buffer */
410 	refcount_inc(&buf->refcount);
411 
412 	return dbuf;
413 }
414 
415 /*********************************************/
416 /*       callbacks for USERPTR buffers       */
417 /*********************************************/
418 
419 static void vb2_dc_put_userptr(void *buf_priv)
420 {
421 	struct vb2_dc_buf *buf = buf_priv;
422 	struct sg_table *sgt = buf->dma_sgt;
423 	int i;
424 	struct page **pages;
425 
426 	if (sgt) {
427 		/*
428 		 * No need to sync to CPU, it's already synced to the CPU
429 		 * since the finish() memop will have been called before this.
430 		 */
431 		dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
432 				   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
433 		pages = frame_vector_pages(buf->vec);
434 		/* sgt should exist only if vector contains pages... */
435 		BUG_ON(IS_ERR(pages));
436 		if (buf->dma_dir == DMA_FROM_DEVICE ||
437 		    buf->dma_dir == DMA_BIDIRECTIONAL)
438 			for (i = 0; i < frame_vector_count(buf->vec); i++)
439 				set_page_dirty_lock(pages[i]);
440 		sg_free_table(sgt);
441 		kfree(sgt);
442 	} else {
443 		dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
444 				   buf->dma_dir, 0);
445 	}
446 	vb2_destroy_framevec(buf->vec);
447 	kfree(buf);
448 }
449 
450 static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
451 	unsigned long size, enum dma_data_direction dma_dir)
452 {
453 	struct vb2_dc_buf *buf;
454 	struct frame_vector *vec;
455 	unsigned int offset;
456 	int n_pages, i;
457 	int ret = 0;
458 	struct sg_table *sgt;
459 	unsigned long contig_size;
460 	unsigned long dma_align = dma_get_cache_alignment();
461 
462 	/* Only cache aligned DMA transfers are reliable */
463 	if (!IS_ALIGNED(vaddr | size, dma_align)) {
464 		pr_debug("user data must be aligned to %lu bytes\n", dma_align);
465 		return ERR_PTR(-EINVAL);
466 	}
467 
468 	if (!size) {
469 		pr_debug("size is zero\n");
470 		return ERR_PTR(-EINVAL);
471 	}
472 
473 	if (WARN_ON(!dev))
474 		return ERR_PTR(-EINVAL);
475 
476 	buf = kzalloc(sizeof *buf, GFP_KERNEL);
477 	if (!buf)
478 		return ERR_PTR(-ENOMEM);
479 
480 	buf->dev = dev;
481 	buf->dma_dir = dma_dir;
482 
483 	offset = lower_32_bits(offset_in_page(vaddr));
484 	vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
485 					       dma_dir == DMA_BIDIRECTIONAL);
486 	if (IS_ERR(vec)) {
487 		ret = PTR_ERR(vec);
488 		goto fail_buf;
489 	}
490 	buf->vec = vec;
491 	n_pages = frame_vector_count(vec);
492 	ret = frame_vector_to_pages(vec);
493 	if (ret < 0) {
494 		unsigned long *nums = frame_vector_pfns(vec);
495 
496 		/*
497 		 * Failed to convert to pages... Check the memory is physically
498 		 * contiguous and use direct mapping
499 		 */
500 		for (i = 1; i < n_pages; i++)
501 			if (nums[i-1] + 1 != nums[i])
502 				goto fail_pfnvec;
503 		buf->dma_addr = dma_map_resource(buf->dev,
504 				__pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
505 		if (dma_mapping_error(buf->dev, buf->dma_addr)) {
506 			ret = -ENOMEM;
507 			goto fail_pfnvec;
508 		}
509 		goto out;
510 	}
511 
512 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
513 	if (!sgt) {
514 		pr_err("failed to allocate sg table\n");
515 		ret = -ENOMEM;
516 		goto fail_pfnvec;
517 	}
518 
519 	ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
520 		offset, size, GFP_KERNEL);
521 	if (ret) {
522 		pr_err("failed to initialize sg table\n");
523 		goto fail_sgt;
524 	}
525 
526 	/*
527 	 * No need to sync to the device, this will happen later when the
528 	 * prepare() memop is called.
529 	 */
530 	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
531 				      buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
532 	if (sgt->nents <= 0) {
533 		pr_err("failed to map scatterlist\n");
534 		ret = -EIO;
535 		goto fail_sgt_init;
536 	}
537 
538 	contig_size = vb2_dc_get_contiguous_size(sgt);
539 	if (contig_size < size) {
540 		pr_err("contiguous mapping is too small %lu/%lu\n",
541 			contig_size, size);
542 		ret = -EFAULT;
543 		goto fail_map_sg;
544 	}
545 
546 	buf->dma_addr = sg_dma_address(sgt->sgl);
547 	buf->dma_sgt = sgt;
548 out:
549 	buf->size = size;
550 
551 	return buf;
552 
553 fail_map_sg:
554 	dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
555 			   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
556 
557 fail_sgt_init:
558 	sg_free_table(sgt);
559 
560 fail_sgt:
561 	kfree(sgt);
562 
563 fail_pfnvec:
564 	vb2_destroy_framevec(vec);
565 
566 fail_buf:
567 	kfree(buf);
568 
569 	return ERR_PTR(ret);
570 }
571 
572 /*********************************************/
573 /*       callbacks for DMABUF buffers        */
574 /*********************************************/
575 
576 static int vb2_dc_map_dmabuf(void *mem_priv)
577 {
578 	struct vb2_dc_buf *buf = mem_priv;
579 	struct sg_table *sgt;
580 	unsigned long contig_size;
581 
582 	if (WARN_ON(!buf->db_attach)) {
583 		pr_err("trying to pin a non attached buffer\n");
584 		return -EINVAL;
585 	}
586 
587 	if (WARN_ON(buf->dma_sgt)) {
588 		pr_err("dmabuf buffer is already pinned\n");
589 		return 0;
590 	}
591 
592 	/* get the associated scatterlist for this buffer */
593 	sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
594 	if (IS_ERR(sgt)) {
595 		pr_err("Error getting dmabuf scatterlist\n");
596 		return -EINVAL;
597 	}
598 
599 	/* checking if dmabuf is big enough to store contiguous chunk */
600 	contig_size = vb2_dc_get_contiguous_size(sgt);
601 	if (contig_size < buf->size) {
602 		pr_err("contiguous chunk is too small %lu/%lu b\n",
603 			contig_size, buf->size);
604 		dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
605 		return -EFAULT;
606 	}
607 
608 	buf->dma_addr = sg_dma_address(sgt->sgl);
609 	buf->dma_sgt = sgt;
610 	buf->vaddr = NULL;
611 
612 	return 0;
613 }
614 
615 static void vb2_dc_unmap_dmabuf(void *mem_priv)
616 {
617 	struct vb2_dc_buf *buf = mem_priv;
618 	struct sg_table *sgt = buf->dma_sgt;
619 
620 	if (WARN_ON(!buf->db_attach)) {
621 		pr_err("trying to unpin a not attached buffer\n");
622 		return;
623 	}
624 
625 	if (WARN_ON(!sgt)) {
626 		pr_err("dmabuf buffer is already unpinned\n");
627 		return;
628 	}
629 
630 	if (buf->vaddr) {
631 		dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
632 		buf->vaddr = NULL;
633 	}
634 	dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
635 
636 	buf->dma_addr = 0;
637 	buf->dma_sgt = NULL;
638 }
639 
640 static void vb2_dc_detach_dmabuf(void *mem_priv)
641 {
642 	struct vb2_dc_buf *buf = mem_priv;
643 
644 	/* if vb2 works correctly you should never detach mapped buffer */
645 	if (WARN_ON(buf->dma_addr))
646 		vb2_dc_unmap_dmabuf(buf);
647 
648 	/* detach this attachment */
649 	dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
650 	kfree(buf);
651 }
652 
653 static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
654 	unsigned long size, enum dma_data_direction dma_dir)
655 {
656 	struct vb2_dc_buf *buf;
657 	struct dma_buf_attachment *dba;
658 
659 	if (dbuf->size < size)
660 		return ERR_PTR(-EFAULT);
661 
662 	if (WARN_ON(!dev))
663 		return ERR_PTR(-EINVAL);
664 
665 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
666 	if (!buf)
667 		return ERR_PTR(-ENOMEM);
668 
669 	buf->dev = dev;
670 	/* create attachment for the dmabuf with the user device */
671 	dba = dma_buf_attach(dbuf, buf->dev);
672 	if (IS_ERR(dba)) {
673 		pr_err("failed to attach dmabuf\n");
674 		kfree(buf);
675 		return dba;
676 	}
677 
678 	buf->dma_dir = dma_dir;
679 	buf->size = size;
680 	buf->db_attach = dba;
681 
682 	return buf;
683 }
684 
685 /*********************************************/
686 /*       DMA CONTIG exported functions       */
687 /*********************************************/
688 
689 const struct vb2_mem_ops vb2_dma_contig_memops = {
690 	.alloc		= vb2_dc_alloc,
691 	.put		= vb2_dc_put,
692 	.get_dmabuf	= vb2_dc_get_dmabuf,
693 	.cookie		= vb2_dc_cookie,
694 	.vaddr		= vb2_dc_vaddr,
695 	.mmap		= vb2_dc_mmap,
696 	.get_userptr	= vb2_dc_get_userptr,
697 	.put_userptr	= vb2_dc_put_userptr,
698 	.prepare	= vb2_dc_prepare,
699 	.finish		= vb2_dc_finish,
700 	.map_dmabuf	= vb2_dc_map_dmabuf,
701 	.unmap_dmabuf	= vb2_dc_unmap_dmabuf,
702 	.attach_dmabuf	= vb2_dc_attach_dmabuf,
703 	.detach_dmabuf	= vb2_dc_detach_dmabuf,
704 	.num_users	= vb2_dc_num_users,
705 };
706 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
707 
708 /**
709  * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
710  * @dev:	device for configuring DMA parameters
711  * @size:	size of DMA max segment size to set
712  *
713  * To allow mapping the scatter-list into a single chunk in the DMA
714  * address space, the device is required to have the DMA max segment
715  * size parameter set to a value larger than the buffer size. Otherwise,
716  * the DMA-mapping subsystem will split the mapping into max segment
717  * size chunks. This function sets the DMA max segment size
718  * parameter to let DMA-mapping map a buffer as a single chunk in DMA
719  * address space.
720  * This code assumes that the DMA-mapping subsystem will merge all
721  * scatterlist segments if this is really possible (for example when
722  * an IOMMU is available and enabled).
723  * Ideally, this parameter should be set by the generic bus code, but it
724  * is left with the default 64KiB value due to historical litmiations in
725  * other subsystems (like limited USB host drivers) and there no good
726  * place to set it to the proper value.
727  * This function should be called from the drivers, which are known to
728  * operate on platforms with IOMMU and provide access to shared buffers
729  * (either USERPTR or DMABUF). This should be done before initializing
730  * videobuf2 queue.
731  */
732 int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
733 {
734 	if (!dev->dma_parms) {
735 		dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
736 		if (!dev->dma_parms)
737 			return -ENOMEM;
738 	}
739 	if (dma_get_max_seg_size(dev) < size)
740 		return dma_set_max_seg_size(dev, size);
741 
742 	return 0;
743 }
744 EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
745 
746 /*
747  * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
748  * @dev:	device for configuring DMA parameters
749  *
750  * This function releases resources allocated to configure DMA parameters
751  * (see vb2_dma_contig_set_max_seg_size() function). It should be called from
752  * device drivers on driver remove.
753  */
754 void vb2_dma_contig_clear_max_seg_size(struct device *dev)
755 {
756 	kfree(dev->dma_parms);
757 	dev->dma_parms = NULL;
758 }
759 EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size);
760 
761 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
762 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
763 MODULE_LICENSE("GPL");
764