1 /*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/refcount.h>
16 #include <linux/scatterlist.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/highmem.h>
21
22 #include <media/videobuf2-v4l2.h>
23 #include <media/videobuf2-dma-contig.h>
24 #include <media/videobuf2-memops.h>
25
26 struct vb2_dc_buf {
27 struct device *dev;
28 void *vaddr;
29 unsigned long size;
30 void *cookie;
31 dma_addr_t dma_addr;
32 unsigned long attrs;
33 enum dma_data_direction dma_dir;
34 struct sg_table *dma_sgt;
35 struct frame_vector *vec;
36
37 /* MMAP related */
38 struct vb2_vmarea_handler handler;
39 refcount_t refcount;
40 struct sg_table *sgt_base;
41
42 /* DMABUF related */
43 struct dma_buf_attachment *db_attach;
44
45 struct vb2_buffer *vb;
46 bool non_coherent_mem;
47 };
48
49 /*********************************************/
50 /* scatterlist table functions */
51 /*********************************************/
52
vb2_dc_get_contiguous_size(struct sg_table * sgt)53 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
54 {
55 struct scatterlist *s;
56 dma_addr_t expected = sg_dma_address(sgt->sgl);
57 unsigned int i;
58 unsigned long size = 0;
59
60 for_each_sgtable_dma_sg(sgt, s, i) {
61 if (sg_dma_address(s) != expected)
62 break;
63 expected += sg_dma_len(s);
64 size += sg_dma_len(s);
65 }
66 return size;
67 }
68
69 /*********************************************/
70 /* callbacks for all buffers */
71 /*********************************************/
72
vb2_dc_cookie(struct vb2_buffer * vb,void * buf_priv)73 static void *vb2_dc_cookie(struct vb2_buffer *vb, void *buf_priv)
74 {
75 struct vb2_dc_buf *buf = buf_priv;
76
77 return &buf->dma_addr;
78 }
79
80 /*
81 * This function may fail if:
82 *
83 * - dma_buf_vmap() fails
84 * E.g. due to lack of virtual mapping address space, or due to
85 * dmabuf->ops misconfiguration.
86 *
87 * - dma_vmap_noncontiguous() fails
88 * For instance, when requested buffer size is larger than totalram_pages().
89 * Relevant for buffers that use non-coherent memory.
90 *
91 * - Queue DMA attrs have DMA_ATTR_NO_KERNEL_MAPPING set
92 * Relevant for buffers that use coherent memory.
93 */
vb2_dc_vaddr(struct vb2_buffer * vb,void * buf_priv)94 static void *vb2_dc_vaddr(struct vb2_buffer *vb, void *buf_priv)
95 {
96 struct vb2_dc_buf *buf = buf_priv;
97
98 if (buf->vaddr)
99 return buf->vaddr;
100
101 if (buf->db_attach) {
102 struct iosys_map map;
103
104 if (!dma_buf_vmap_unlocked(buf->db_attach->dmabuf, &map))
105 buf->vaddr = map.vaddr;
106
107 return buf->vaddr;
108 }
109
110 if (buf->non_coherent_mem)
111 buf->vaddr = dma_vmap_noncontiguous(buf->dev, buf->size,
112 buf->dma_sgt);
113 return buf->vaddr;
114 }
115
vb2_dc_num_users(void * buf_priv)116 static unsigned int vb2_dc_num_users(void *buf_priv)
117 {
118 struct vb2_dc_buf *buf = buf_priv;
119
120 return refcount_read(&buf->refcount);
121 }
122
vb2_dc_prepare(void * buf_priv)123 static void vb2_dc_prepare(void *buf_priv)
124 {
125 struct vb2_dc_buf *buf = buf_priv;
126 struct sg_table *sgt = buf->dma_sgt;
127
128 /* This takes care of DMABUF and user-enforced cache sync hint */
129 if (buf->vb->skip_cache_sync_on_prepare)
130 return;
131
132 if (!buf->non_coherent_mem)
133 return;
134
135 /* Non-coherent MMAP only */
136 if (buf->vaddr)
137 flush_kernel_vmap_range(buf->vaddr, buf->size);
138
139 /* For both USERPTR and non-coherent MMAP */
140 dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
141 }
142
vb2_dc_finish(void * buf_priv)143 static void vb2_dc_finish(void *buf_priv)
144 {
145 struct vb2_dc_buf *buf = buf_priv;
146 struct sg_table *sgt = buf->dma_sgt;
147
148 /* This takes care of DMABUF and user-enforced cache sync hint */
149 if (buf->vb->skip_cache_sync_on_finish)
150 return;
151
152 if (!buf->non_coherent_mem)
153 return;
154
155 /* Non-coherent MMAP only */
156 if (buf->vaddr)
157 invalidate_kernel_vmap_range(buf->vaddr, buf->size);
158
159 /* For both USERPTR and non-coherent MMAP */
160 dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
161 }
162
163 /*********************************************/
164 /* callbacks for MMAP buffers */
165 /*********************************************/
166
vb2_dc_put(void * buf_priv)167 static void vb2_dc_put(void *buf_priv)
168 {
169 struct vb2_dc_buf *buf = buf_priv;
170
171 if (!refcount_dec_and_test(&buf->refcount))
172 return;
173
174 if (buf->non_coherent_mem) {
175 if (buf->vaddr)
176 dma_vunmap_noncontiguous(buf->dev, buf->vaddr);
177 dma_free_noncontiguous(buf->dev, buf->size,
178 buf->dma_sgt, buf->dma_dir);
179 } else {
180 if (buf->sgt_base) {
181 sg_free_table(buf->sgt_base);
182 kfree(buf->sgt_base);
183 }
184 dma_free_attrs(buf->dev, buf->size, buf->cookie,
185 buf->dma_addr, buf->attrs);
186 }
187 put_device(buf->dev);
188 kfree(buf);
189 }
190
vb2_dc_alloc_coherent(struct vb2_dc_buf * buf)191 static int vb2_dc_alloc_coherent(struct vb2_dc_buf *buf)
192 {
193 struct vb2_queue *q = buf->vb->vb2_queue;
194
195 buf->cookie = dma_alloc_attrs(buf->dev,
196 buf->size,
197 &buf->dma_addr,
198 GFP_KERNEL | q->gfp_flags,
199 buf->attrs);
200 if (!buf->cookie)
201 return -ENOMEM;
202
203 if (q->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
204 return 0;
205
206 buf->vaddr = buf->cookie;
207 return 0;
208 }
209
vb2_dc_alloc_non_coherent(struct vb2_dc_buf * buf)210 static int vb2_dc_alloc_non_coherent(struct vb2_dc_buf *buf)
211 {
212 struct vb2_queue *q = buf->vb->vb2_queue;
213
214 buf->dma_sgt = dma_alloc_noncontiguous(buf->dev,
215 buf->size,
216 buf->dma_dir,
217 GFP_KERNEL | q->gfp_flags,
218 buf->attrs);
219 if (!buf->dma_sgt)
220 return -ENOMEM;
221
222 buf->dma_addr = sg_dma_address(buf->dma_sgt->sgl);
223
224 /*
225 * For non-coherent buffers the kernel mapping is created on demand
226 * in vb2_dc_vaddr().
227 */
228 return 0;
229 }
230
vb2_dc_alloc(struct vb2_buffer * vb,struct device * dev,unsigned long size)231 static void *vb2_dc_alloc(struct vb2_buffer *vb,
232 struct device *dev,
233 unsigned long size)
234 {
235 struct vb2_dc_buf *buf;
236 int ret;
237
238 if (WARN_ON(!dev))
239 return ERR_PTR(-EINVAL);
240
241 buf = kzalloc(sizeof *buf, GFP_KERNEL);
242 if (!buf)
243 return ERR_PTR(-ENOMEM);
244
245 buf->attrs = vb->vb2_queue->dma_attrs;
246 buf->dma_dir = vb->vb2_queue->dma_dir;
247 buf->vb = vb;
248 buf->non_coherent_mem = vb->vb2_queue->non_coherent_mem;
249
250 buf->size = size;
251 /* Prevent the device from being released while the buffer is used */
252 buf->dev = get_device(dev);
253
254 if (buf->non_coherent_mem)
255 ret = vb2_dc_alloc_non_coherent(buf);
256 else
257 ret = vb2_dc_alloc_coherent(buf);
258
259 if (ret) {
260 dev_err(dev, "dma alloc of size %lu failed\n", size);
261 kfree(buf);
262 return ERR_PTR(-ENOMEM);
263 }
264
265 buf->handler.refcount = &buf->refcount;
266 buf->handler.put = vb2_dc_put;
267 buf->handler.arg = buf;
268
269 refcount_set(&buf->refcount, 1);
270
271 return buf;
272 }
273
vb2_dc_mmap(void * buf_priv,struct vm_area_struct * vma)274 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
275 {
276 struct vb2_dc_buf *buf = buf_priv;
277 int ret;
278
279 if (!buf) {
280 printk(KERN_ERR "No buffer to map\n");
281 return -EINVAL;
282 }
283
284 if (buf->non_coherent_mem)
285 ret = dma_mmap_noncontiguous(buf->dev, vma, buf->size,
286 buf->dma_sgt);
287 else
288 ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, buf->dma_addr,
289 buf->size, buf->attrs);
290 if (ret) {
291 pr_err("Remapping memory failed, error: %d\n", ret);
292 return ret;
293 }
294
295 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
296 vma->vm_private_data = &buf->handler;
297 vma->vm_ops = &vb2_common_vm_ops;
298
299 vma->vm_ops->open(vma);
300
301 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %lu\n",
302 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
303 buf->size);
304
305 return 0;
306 }
307
308 /*********************************************/
309 /* DMABUF ops for exporters */
310 /*********************************************/
311
312 struct vb2_dc_attachment {
313 struct sg_table sgt;
314 enum dma_data_direction dma_dir;
315 };
316
vb2_dc_dmabuf_ops_attach(struct dma_buf * dbuf,struct dma_buf_attachment * dbuf_attach)317 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
318 struct dma_buf_attachment *dbuf_attach)
319 {
320 struct vb2_dc_attachment *attach;
321 unsigned int i;
322 struct scatterlist *rd, *wr;
323 struct sg_table *sgt;
324 struct vb2_dc_buf *buf = dbuf->priv;
325 int ret;
326
327 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
328 if (!attach)
329 return -ENOMEM;
330
331 sgt = &attach->sgt;
332 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
333 * map the same scatter list to multiple attachments at the same time.
334 */
335 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
336 if (ret) {
337 kfree(attach);
338 return -ENOMEM;
339 }
340
341 rd = buf->sgt_base->sgl;
342 wr = sgt->sgl;
343 for (i = 0; i < sgt->orig_nents; ++i) {
344 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
345 rd = sg_next(rd);
346 wr = sg_next(wr);
347 }
348
349 attach->dma_dir = DMA_NONE;
350 dbuf_attach->priv = attach;
351
352 return 0;
353 }
354
vb2_dc_dmabuf_ops_detach(struct dma_buf * dbuf,struct dma_buf_attachment * db_attach)355 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
356 struct dma_buf_attachment *db_attach)
357 {
358 struct vb2_dc_attachment *attach = db_attach->priv;
359 struct sg_table *sgt;
360
361 if (!attach)
362 return;
363
364 sgt = &attach->sgt;
365
366 /* release the scatterlist cache */
367 if (attach->dma_dir != DMA_NONE)
368 /*
369 * Cache sync can be skipped here, as the vb2_dc memory is
370 * allocated from device coherent memory, which means the
371 * memory locations do not require any explicit cache
372 * maintenance prior or after being used by the device.
373 */
374 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
375 DMA_ATTR_SKIP_CPU_SYNC);
376 sg_free_table(sgt);
377 kfree(attach);
378 db_attach->priv = NULL;
379 }
380
vb2_dc_dmabuf_ops_map(struct dma_buf_attachment * db_attach,enum dma_data_direction dma_dir)381 static struct sg_table *vb2_dc_dmabuf_ops_map(
382 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
383 {
384 struct vb2_dc_attachment *attach = db_attach->priv;
385 struct sg_table *sgt;
386
387 sgt = &attach->sgt;
388 /* return previously mapped sg table */
389 if (attach->dma_dir == dma_dir)
390 return sgt;
391
392 /* release any previous cache */
393 if (attach->dma_dir != DMA_NONE) {
394 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
395 DMA_ATTR_SKIP_CPU_SYNC);
396 attach->dma_dir = DMA_NONE;
397 }
398
399 /*
400 * mapping to the client with new direction, no cache sync
401 * required see comment in vb2_dc_dmabuf_ops_detach()
402 */
403 if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
404 DMA_ATTR_SKIP_CPU_SYNC)) {
405 pr_err("failed to map scatterlist\n");
406 return ERR_PTR(-EIO);
407 }
408
409 attach->dma_dir = dma_dir;
410
411 return sgt;
412 }
413
vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment * db_attach,struct sg_table * sgt,enum dma_data_direction dma_dir)414 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
415 struct sg_table *sgt, enum dma_data_direction dma_dir)
416 {
417 /* nothing to be done here */
418 }
419
vb2_dc_dmabuf_ops_release(struct dma_buf * dbuf)420 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
421 {
422 /* drop reference obtained in vb2_dc_get_dmabuf */
423 vb2_dc_put(dbuf->priv);
424 }
425
426 static int
vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf * dbuf,enum dma_data_direction direction)427 vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
428 enum dma_data_direction direction)
429 {
430 return 0;
431 }
432
433 static int
vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf * dbuf,enum dma_data_direction direction)434 vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
435 enum dma_data_direction direction)
436 {
437 return 0;
438 }
439
vb2_dc_dmabuf_ops_vmap(struct dma_buf * dbuf,struct iosys_map * map)440 static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct iosys_map *map)
441 {
442 struct vb2_dc_buf *buf;
443 void *vaddr;
444
445 buf = dbuf->priv;
446 vaddr = vb2_dc_vaddr(buf->vb, buf);
447 if (!vaddr)
448 return -EINVAL;
449
450 iosys_map_set_vaddr(map, vaddr);
451
452 return 0;
453 }
454
vb2_dc_dmabuf_ops_mmap(struct dma_buf * dbuf,struct vm_area_struct * vma)455 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
456 struct vm_area_struct *vma)
457 {
458 return vb2_dc_mmap(dbuf->priv, vma);
459 }
460
461 static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
462 .attach = vb2_dc_dmabuf_ops_attach,
463 .detach = vb2_dc_dmabuf_ops_detach,
464 .map_dma_buf = vb2_dc_dmabuf_ops_map,
465 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
466 .begin_cpu_access = vb2_dc_dmabuf_ops_begin_cpu_access,
467 .end_cpu_access = vb2_dc_dmabuf_ops_end_cpu_access,
468 .vmap = vb2_dc_dmabuf_ops_vmap,
469 .mmap = vb2_dc_dmabuf_ops_mmap,
470 .release = vb2_dc_dmabuf_ops_release,
471 };
472
vb2_dc_get_base_sgt(struct vb2_dc_buf * buf)473 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
474 {
475 int ret;
476 struct sg_table *sgt;
477
478 if (buf->non_coherent_mem)
479 return buf->dma_sgt;
480
481 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
482 if (!sgt) {
483 dev_err(buf->dev, "failed to alloc sg table\n");
484 return NULL;
485 }
486
487 ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
488 buf->size, buf->attrs);
489 if (ret < 0) {
490 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
491 kfree(sgt);
492 return NULL;
493 }
494
495 return sgt;
496 }
497
vb2_dc_get_dmabuf(struct vb2_buffer * vb,void * buf_priv,unsigned long flags)498 static struct dma_buf *vb2_dc_get_dmabuf(struct vb2_buffer *vb,
499 void *buf_priv,
500 unsigned long flags)
501 {
502 struct vb2_dc_buf *buf = buf_priv;
503 struct dma_buf *dbuf;
504 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
505
506 exp_info.ops = &vb2_dc_dmabuf_ops;
507 exp_info.size = buf->size;
508 exp_info.flags = flags;
509 exp_info.priv = buf;
510
511 if (!buf->sgt_base)
512 buf->sgt_base = vb2_dc_get_base_sgt(buf);
513
514 if (WARN_ON(!buf->sgt_base))
515 return NULL;
516
517 dbuf = dma_buf_export(&exp_info);
518 if (IS_ERR(dbuf))
519 return NULL;
520
521 /* dmabuf keeps reference to vb2 buffer */
522 refcount_inc(&buf->refcount);
523
524 return dbuf;
525 }
526
527 /*********************************************/
528 /* callbacks for USERPTR buffers */
529 /*********************************************/
530
vb2_dc_put_userptr(void * buf_priv)531 static void vb2_dc_put_userptr(void *buf_priv)
532 {
533 struct vb2_dc_buf *buf = buf_priv;
534 struct sg_table *sgt = buf->dma_sgt;
535 int i;
536 struct page **pages;
537
538 if (sgt) {
539 /*
540 * No need to sync to CPU, it's already synced to the CPU
541 * since the finish() memop will have been called before this.
542 */
543 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
544 DMA_ATTR_SKIP_CPU_SYNC);
545 pages = frame_vector_pages(buf->vec);
546 /* sgt should exist only if vector contains pages... */
547 BUG_ON(IS_ERR(pages));
548 if (buf->dma_dir == DMA_FROM_DEVICE ||
549 buf->dma_dir == DMA_BIDIRECTIONAL)
550 for (i = 0; i < frame_vector_count(buf->vec); i++)
551 set_page_dirty_lock(pages[i]);
552 sg_free_table(sgt);
553 kfree(sgt);
554 } else {
555 dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
556 buf->dma_dir, 0);
557 }
558 vb2_destroy_framevec(buf->vec);
559 kfree(buf);
560 }
561
vb2_dc_get_userptr(struct vb2_buffer * vb,struct device * dev,unsigned long vaddr,unsigned long size)562 static void *vb2_dc_get_userptr(struct vb2_buffer *vb, struct device *dev,
563 unsigned long vaddr, unsigned long size)
564 {
565 struct vb2_dc_buf *buf;
566 struct frame_vector *vec;
567 unsigned int offset;
568 int n_pages, i;
569 int ret = 0;
570 struct sg_table *sgt;
571 unsigned long contig_size;
572 unsigned long dma_align = dma_get_cache_alignment();
573
574 /* Only cache aligned DMA transfers are reliable */
575 if (!IS_ALIGNED(vaddr | size, dma_align)) {
576 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
577 return ERR_PTR(-EINVAL);
578 }
579
580 if (!size) {
581 pr_debug("size is zero\n");
582 return ERR_PTR(-EINVAL);
583 }
584
585 if (WARN_ON(!dev))
586 return ERR_PTR(-EINVAL);
587
588 buf = kzalloc(sizeof *buf, GFP_KERNEL);
589 if (!buf)
590 return ERR_PTR(-ENOMEM);
591
592 buf->dev = dev;
593 buf->dma_dir = vb->vb2_queue->dma_dir;
594 buf->vb = vb;
595
596 offset = lower_32_bits(offset_in_page(vaddr));
597 vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE ||
598 buf->dma_dir == DMA_BIDIRECTIONAL);
599 if (IS_ERR(vec)) {
600 ret = PTR_ERR(vec);
601 goto fail_buf;
602 }
603 buf->vec = vec;
604 n_pages = frame_vector_count(vec);
605 ret = frame_vector_to_pages(vec);
606 if (ret < 0) {
607 unsigned long *nums = frame_vector_pfns(vec);
608
609 /*
610 * Failed to convert to pages... Check the memory is physically
611 * contiguous and use direct mapping
612 */
613 for (i = 1; i < n_pages; i++)
614 if (nums[i-1] + 1 != nums[i])
615 goto fail_pfnvec;
616 buf->dma_addr = dma_map_resource(buf->dev,
617 __pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
618 if (dma_mapping_error(buf->dev, buf->dma_addr)) {
619 ret = -ENOMEM;
620 goto fail_pfnvec;
621 }
622 goto out;
623 }
624
625 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
626 if (!sgt) {
627 pr_err("failed to allocate sg table\n");
628 ret = -ENOMEM;
629 goto fail_pfnvec;
630 }
631
632 ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
633 offset, size, GFP_KERNEL);
634 if (ret) {
635 pr_err("failed to initialize sg table\n");
636 goto fail_sgt;
637 }
638
639 /*
640 * No need to sync to the device, this will happen later when the
641 * prepare() memop is called.
642 */
643 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
644 DMA_ATTR_SKIP_CPU_SYNC)) {
645 pr_err("failed to map scatterlist\n");
646 ret = -EIO;
647 goto fail_sgt_init;
648 }
649
650 contig_size = vb2_dc_get_contiguous_size(sgt);
651 if (contig_size < size) {
652 pr_err("contiguous mapping is too small %lu/%lu\n",
653 contig_size, size);
654 ret = -EFAULT;
655 goto fail_map_sg;
656 }
657
658 buf->dma_addr = sg_dma_address(sgt->sgl);
659 buf->dma_sgt = sgt;
660 buf->non_coherent_mem = 1;
661
662 out:
663 buf->size = size;
664
665 return buf;
666
667 fail_map_sg:
668 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
669
670 fail_sgt_init:
671 sg_free_table(sgt);
672
673 fail_sgt:
674 kfree(sgt);
675
676 fail_pfnvec:
677 vb2_destroy_framevec(vec);
678
679 fail_buf:
680 kfree(buf);
681
682 return ERR_PTR(ret);
683 }
684
685 /*********************************************/
686 /* callbacks for DMABUF buffers */
687 /*********************************************/
688
vb2_dc_map_dmabuf(void * mem_priv)689 static int vb2_dc_map_dmabuf(void *mem_priv)
690 {
691 struct vb2_dc_buf *buf = mem_priv;
692 struct sg_table *sgt;
693 unsigned long contig_size;
694
695 if (WARN_ON(!buf->db_attach)) {
696 pr_err("trying to pin a non attached buffer\n");
697 return -EINVAL;
698 }
699
700 if (WARN_ON(buf->dma_sgt)) {
701 pr_err("dmabuf buffer is already pinned\n");
702 return 0;
703 }
704
705 /* get the associated scatterlist for this buffer */
706 sgt = dma_buf_map_attachment_unlocked(buf->db_attach, buf->dma_dir);
707 if (IS_ERR(sgt)) {
708 pr_err("Error getting dmabuf scatterlist\n");
709 return -EINVAL;
710 }
711
712 /* checking if dmabuf is big enough to store contiguous chunk */
713 contig_size = vb2_dc_get_contiguous_size(sgt);
714 if (contig_size < buf->size) {
715 pr_err("contiguous chunk is too small %lu/%lu\n",
716 contig_size, buf->size);
717 dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt,
718 buf->dma_dir);
719 return -EFAULT;
720 }
721
722 buf->dma_addr = sg_dma_address(sgt->sgl);
723 buf->dma_sgt = sgt;
724 buf->vaddr = NULL;
725
726 return 0;
727 }
728
vb2_dc_unmap_dmabuf(void * mem_priv)729 static void vb2_dc_unmap_dmabuf(void *mem_priv)
730 {
731 struct vb2_dc_buf *buf = mem_priv;
732 struct sg_table *sgt = buf->dma_sgt;
733 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
734
735 if (WARN_ON(!buf->db_attach)) {
736 pr_err("trying to unpin a not attached buffer\n");
737 return;
738 }
739
740 if (WARN_ON(!sgt)) {
741 pr_err("dmabuf buffer is already unpinned\n");
742 return;
743 }
744
745 if (buf->vaddr) {
746 dma_buf_vunmap_unlocked(buf->db_attach->dmabuf, &map);
747 buf->vaddr = NULL;
748 }
749 dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt, buf->dma_dir);
750
751 buf->dma_addr = 0;
752 buf->dma_sgt = NULL;
753 }
754
vb2_dc_detach_dmabuf(void * mem_priv)755 static void vb2_dc_detach_dmabuf(void *mem_priv)
756 {
757 struct vb2_dc_buf *buf = mem_priv;
758
759 /* if vb2 works correctly you should never detach mapped buffer */
760 if (WARN_ON(buf->dma_addr))
761 vb2_dc_unmap_dmabuf(buf);
762
763 /* detach this attachment */
764 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
765 kfree(buf);
766 }
767
vb2_dc_attach_dmabuf(struct vb2_buffer * vb,struct device * dev,struct dma_buf * dbuf,unsigned long size)768 static void *vb2_dc_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
769 struct dma_buf *dbuf, unsigned long size)
770 {
771 struct vb2_dc_buf *buf;
772 struct dma_buf_attachment *dba;
773
774 if (dbuf->size < size)
775 return ERR_PTR(-EFAULT);
776
777 if (WARN_ON(!dev))
778 return ERR_PTR(-EINVAL);
779
780 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
781 if (!buf)
782 return ERR_PTR(-ENOMEM);
783
784 buf->dev = dev;
785 buf->vb = vb;
786
787 /* create attachment for the dmabuf with the user device */
788 dba = dma_buf_attach(dbuf, buf->dev);
789 if (IS_ERR(dba)) {
790 pr_err("failed to attach dmabuf\n");
791 kfree(buf);
792 return dba;
793 }
794
795 buf->dma_dir = vb->vb2_queue->dma_dir;
796 buf->size = size;
797 buf->db_attach = dba;
798
799 return buf;
800 }
801
802 /*********************************************/
803 /* DMA CONTIG exported functions */
804 /*********************************************/
805
806 const struct vb2_mem_ops vb2_dma_contig_memops = {
807 .alloc = vb2_dc_alloc,
808 .put = vb2_dc_put,
809 .get_dmabuf = vb2_dc_get_dmabuf,
810 .cookie = vb2_dc_cookie,
811 .vaddr = vb2_dc_vaddr,
812 .mmap = vb2_dc_mmap,
813 .get_userptr = vb2_dc_get_userptr,
814 .put_userptr = vb2_dc_put_userptr,
815 .prepare = vb2_dc_prepare,
816 .finish = vb2_dc_finish,
817 .map_dmabuf = vb2_dc_map_dmabuf,
818 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
819 .attach_dmabuf = vb2_dc_attach_dmabuf,
820 .detach_dmabuf = vb2_dc_detach_dmabuf,
821 .num_users = vb2_dc_num_users,
822 };
823 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
824
825 /**
826 * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
827 * @dev: device for configuring DMA parameters
828 * @size: size of DMA max segment size to set
829 *
830 * To allow mapping the scatter-list into a single chunk in the DMA
831 * address space, the device is required to have the DMA max segment
832 * size parameter set to a value larger than the buffer size. Otherwise,
833 * the DMA-mapping subsystem will split the mapping into max segment
834 * size chunks. This function sets the DMA max segment size
835 * parameter to let DMA-mapping map a buffer as a single chunk in DMA
836 * address space.
837 * This code assumes that the DMA-mapping subsystem will merge all
838 * scatterlist segments if this is really possible (for example when
839 * an IOMMU is available and enabled).
840 * Ideally, this parameter should be set by the generic bus code, but it
841 * is left with the default 64KiB value due to historical litmiations in
842 * other subsystems (like limited USB host drivers) and there no good
843 * place to set it to the proper value.
844 * This function should be called from the drivers, which are known to
845 * operate on platforms with IOMMU and provide access to shared buffers
846 * (either USERPTR or DMABUF). This should be done before initializing
847 * videobuf2 queue.
848 */
vb2_dma_contig_set_max_seg_size(struct device * dev,unsigned int size)849 int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
850 {
851 if (!dev->dma_parms) {
852 dev_err(dev, "Failed to set max_seg_size: dma_parms is NULL\n");
853 return -ENODEV;
854 }
855 if (dma_get_max_seg_size(dev) < size)
856 return dma_set_max_seg_size(dev, size);
857
858 return 0;
859 }
860 EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
861
862 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
863 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
864 MODULE_LICENSE("GPL");
865 MODULE_IMPORT_NS(DMA_BUF);
866