17952be9bSHans Verkuil /* 27952be9bSHans Verkuil * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2 37952be9bSHans Verkuil * 47952be9bSHans Verkuil * Copyright (C) 2010 Samsung Electronics 57952be9bSHans Verkuil * 666e988e9SAndrzej Pietrasiewicz * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com> 77952be9bSHans Verkuil * 87952be9bSHans Verkuil * This program is free software; you can redistribute it and/or modify 97952be9bSHans Verkuil * it under the terms of the GNU General Public License as published by 107952be9bSHans Verkuil * the Free Software Foundation. 117952be9bSHans Verkuil */ 127952be9bSHans Verkuil 137952be9bSHans Verkuil #include <linux/module.h> 147952be9bSHans Verkuil #include <linux/mm.h> 157952be9bSHans Verkuil #include <linux/refcount.h> 167952be9bSHans Verkuil #include <linux/scatterlist.h> 177952be9bSHans Verkuil #include <linux/sched.h> 187952be9bSHans Verkuil #include <linux/slab.h> 197952be9bSHans Verkuil #include <linux/vmalloc.h> 207952be9bSHans Verkuil 217952be9bSHans Verkuil #include <media/videobuf2-v4l2.h> 227952be9bSHans Verkuil #include <media/videobuf2-memops.h> 237952be9bSHans Verkuil #include <media/videobuf2-dma-sg.h> 247952be9bSHans Verkuil 257952be9bSHans Verkuil static int debug; 267952be9bSHans Verkuil module_param(debug, int, 0644); 277952be9bSHans Verkuil 287952be9bSHans Verkuil #define dprintk(level, fmt, arg...) \ 297952be9bSHans Verkuil do { \ 307952be9bSHans Verkuil if (debug >= level) \ 317952be9bSHans Verkuil printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \ 327952be9bSHans Verkuil } while (0) 337952be9bSHans Verkuil 347952be9bSHans Verkuil struct vb2_dma_sg_buf { 357952be9bSHans Verkuil struct device *dev; 367952be9bSHans Verkuil void *vaddr; 377952be9bSHans Verkuil struct page **pages; 387952be9bSHans Verkuil struct frame_vector *vec; 397952be9bSHans Verkuil int offset; 407952be9bSHans Verkuil enum dma_data_direction dma_dir; 417952be9bSHans Verkuil struct sg_table sg_table; 427952be9bSHans Verkuil /* 437952be9bSHans Verkuil * This will point to sg_table when used with the MMAP or USERPTR 447952be9bSHans Verkuil * memory model, and to the dma_buf sglist when used with the 457952be9bSHans Verkuil * DMABUF memory model. 467952be9bSHans Verkuil */ 477952be9bSHans Verkuil struct sg_table *dma_sgt; 487952be9bSHans Verkuil size_t size; 497952be9bSHans Verkuil unsigned int num_pages; 507952be9bSHans Verkuil refcount_t refcount; 517952be9bSHans Verkuil struct vb2_vmarea_handler handler; 527952be9bSHans Verkuil 537952be9bSHans Verkuil struct dma_buf_attachment *db_attach; 547952be9bSHans Verkuil }; 557952be9bSHans Verkuil 567952be9bSHans Verkuil static void vb2_dma_sg_put(void *buf_priv); 577952be9bSHans Verkuil 587952be9bSHans Verkuil static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf, 597952be9bSHans Verkuil gfp_t gfp_flags) 607952be9bSHans Verkuil { 617952be9bSHans Verkuil unsigned int last_page = 0; 6214f28f5cSSakari Ailus unsigned long size = buf->size; 637952be9bSHans Verkuil 647952be9bSHans Verkuil while (size > 0) { 657952be9bSHans Verkuil struct page *pages; 667952be9bSHans Verkuil int order; 677952be9bSHans Verkuil int i; 687952be9bSHans Verkuil 697952be9bSHans Verkuil order = get_order(size); 704b129dc9SMauro Carvalho Chehab /* Don't over allocate*/ 717952be9bSHans Verkuil if ((PAGE_SIZE << order) > size) 727952be9bSHans Verkuil order--; 737952be9bSHans Verkuil 747952be9bSHans Verkuil pages = NULL; 757952be9bSHans Verkuil while (!pages) { 767952be9bSHans Verkuil pages = alloc_pages(GFP_KERNEL | __GFP_ZERO | 777952be9bSHans Verkuil __GFP_NOWARN | gfp_flags, order); 787952be9bSHans Verkuil if (pages) 797952be9bSHans Verkuil break; 807952be9bSHans Verkuil 817952be9bSHans Verkuil if (order == 0) { 827952be9bSHans Verkuil while (last_page--) 837952be9bSHans Verkuil __free_page(buf->pages[last_page]); 847952be9bSHans Verkuil return -ENOMEM; 857952be9bSHans Verkuil } 867952be9bSHans Verkuil order--; 877952be9bSHans Verkuil } 887952be9bSHans Verkuil 897952be9bSHans Verkuil split_page(pages, order); 907952be9bSHans Verkuil for (i = 0; i < (1 << order); i++) 917952be9bSHans Verkuil buf->pages[last_page++] = &pages[i]; 927952be9bSHans Verkuil 937952be9bSHans Verkuil size -= PAGE_SIZE << order; 947952be9bSHans Verkuil } 957952be9bSHans Verkuil 967952be9bSHans Verkuil return 0; 977952be9bSHans Verkuil } 987952be9bSHans Verkuil 997952be9bSHans Verkuil static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs, 1007952be9bSHans Verkuil unsigned long size, enum dma_data_direction dma_dir, 1017952be9bSHans Verkuil gfp_t gfp_flags) 1027952be9bSHans Verkuil { 1037952be9bSHans Verkuil struct vb2_dma_sg_buf *buf; 1047952be9bSHans Verkuil struct sg_table *sgt; 1057952be9bSHans Verkuil int ret; 1067952be9bSHans Verkuil int num_pages; 1077952be9bSHans Verkuil 1087952be9bSHans Verkuil if (WARN_ON(!dev)) 1097952be9bSHans Verkuil return ERR_PTR(-EINVAL); 1107952be9bSHans Verkuil 1117952be9bSHans Verkuil buf = kzalloc(sizeof *buf, GFP_KERNEL); 1127952be9bSHans Verkuil if (!buf) 1137952be9bSHans Verkuil return ERR_PTR(-ENOMEM); 1147952be9bSHans Verkuil 1157952be9bSHans Verkuil buf->vaddr = NULL; 1167952be9bSHans Verkuil buf->dma_dir = dma_dir; 1177952be9bSHans Verkuil buf->offset = 0; 1187952be9bSHans Verkuil buf->size = size; 1197952be9bSHans Verkuil /* size is already page aligned */ 1207952be9bSHans Verkuil buf->num_pages = size >> PAGE_SHIFT; 1217952be9bSHans Verkuil buf->dma_sgt = &buf->sg_table; 1227952be9bSHans Verkuil 123d4db5eb5SSergey Senozhatsky /* 124d4db5eb5SSergey Senozhatsky * NOTE: dma-sg allocates memory using the page allocator directly, so 125d4db5eb5SSergey Senozhatsky * there is no memory consistency guarantee, hence dma-sg ignores DMA 126d4db5eb5SSergey Senozhatsky * attributes passed from the upper layer. That means that 127d4db5eb5SSergey Senozhatsky * V4L2_FLAG_MEMORY_NON_CONSISTENT has no effect on dma-sg buffers. 128d4db5eb5SSergey Senozhatsky */ 1297952be9bSHans Verkuil buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *), 1307952be9bSHans Verkuil GFP_KERNEL | __GFP_ZERO); 1317952be9bSHans Verkuil if (!buf->pages) 1327952be9bSHans Verkuil goto fail_pages_array_alloc; 1337952be9bSHans Verkuil 1347952be9bSHans Verkuil ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags); 1357952be9bSHans Verkuil if (ret) 1367952be9bSHans Verkuil goto fail_pages_alloc; 1377952be9bSHans Verkuil 1387952be9bSHans Verkuil ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, 1397952be9bSHans Verkuil buf->num_pages, 0, size, GFP_KERNEL); 1407952be9bSHans Verkuil if (ret) 1417952be9bSHans Verkuil goto fail_table_alloc; 1427952be9bSHans Verkuil 1437952be9bSHans Verkuil /* Prevent the device from being released while the buffer is used */ 1447952be9bSHans Verkuil buf->dev = get_device(dev); 1457952be9bSHans Verkuil 1467952be9bSHans Verkuil sgt = &buf->sg_table; 1477952be9bSHans Verkuil /* 1487952be9bSHans Verkuil * No need to sync to the device, this will happen later when the 1497952be9bSHans Verkuil * prepare() memop is called. 1507952be9bSHans Verkuil */ 1517952be9bSHans Verkuil sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, 1527952be9bSHans Verkuil buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); 1537952be9bSHans Verkuil if (!sgt->nents) 1547952be9bSHans Verkuil goto fail_map; 1557952be9bSHans Verkuil 1567952be9bSHans Verkuil buf->handler.refcount = &buf->refcount; 1577952be9bSHans Verkuil buf->handler.put = vb2_dma_sg_put; 1587952be9bSHans Verkuil buf->handler.arg = buf; 1597952be9bSHans Verkuil 1607952be9bSHans Verkuil refcount_set(&buf->refcount, 1); 1617952be9bSHans Verkuil 1627952be9bSHans Verkuil dprintk(1, "%s: Allocated buffer of %d pages\n", 1637952be9bSHans Verkuil __func__, buf->num_pages); 1647952be9bSHans Verkuil return buf; 1657952be9bSHans Verkuil 1667952be9bSHans Verkuil fail_map: 1677952be9bSHans Verkuil put_device(buf->dev); 1687952be9bSHans Verkuil sg_free_table(buf->dma_sgt); 1697952be9bSHans Verkuil fail_table_alloc: 1707952be9bSHans Verkuil num_pages = buf->num_pages; 1717952be9bSHans Verkuil while (num_pages--) 1727952be9bSHans Verkuil __free_page(buf->pages[num_pages]); 1737952be9bSHans Verkuil fail_pages_alloc: 1747952be9bSHans Verkuil kvfree(buf->pages); 1757952be9bSHans Verkuil fail_pages_array_alloc: 1767952be9bSHans Verkuil kfree(buf); 1777952be9bSHans Verkuil return ERR_PTR(-ENOMEM); 1787952be9bSHans Verkuil } 1797952be9bSHans Verkuil 1807952be9bSHans Verkuil static void vb2_dma_sg_put(void *buf_priv) 1817952be9bSHans Verkuil { 1827952be9bSHans Verkuil struct vb2_dma_sg_buf *buf = buf_priv; 1837952be9bSHans Verkuil struct sg_table *sgt = &buf->sg_table; 1847952be9bSHans Verkuil int i = buf->num_pages; 1857952be9bSHans Verkuil 1867952be9bSHans Verkuil if (refcount_dec_and_test(&buf->refcount)) { 1877952be9bSHans Verkuil dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, 1887952be9bSHans Verkuil buf->num_pages); 1897952be9bSHans Verkuil dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, 1907952be9bSHans Verkuil buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); 1917952be9bSHans Verkuil if (buf->vaddr) 1927952be9bSHans Verkuil vm_unmap_ram(buf->vaddr, buf->num_pages); 1937952be9bSHans Verkuil sg_free_table(buf->dma_sgt); 1947952be9bSHans Verkuil while (--i >= 0) 1957952be9bSHans Verkuil __free_page(buf->pages[i]); 1967952be9bSHans Verkuil kvfree(buf->pages); 1977952be9bSHans Verkuil put_device(buf->dev); 1987952be9bSHans Verkuil kfree(buf); 1997952be9bSHans Verkuil } 2007952be9bSHans Verkuil } 2017952be9bSHans Verkuil 2027952be9bSHans Verkuil static void vb2_dma_sg_prepare(void *buf_priv) 2037952be9bSHans Verkuil { 2047952be9bSHans Verkuil struct vb2_dma_sg_buf *buf = buf_priv; 2057952be9bSHans Verkuil struct sg_table *sgt = buf->dma_sgt; 2067952be9bSHans Verkuil 2077952be9bSHans Verkuil dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents, 2087952be9bSHans Verkuil buf->dma_dir); 2097952be9bSHans Verkuil } 2107952be9bSHans Verkuil 2117952be9bSHans Verkuil static void vb2_dma_sg_finish(void *buf_priv) 2127952be9bSHans Verkuil { 2137952be9bSHans Verkuil struct vb2_dma_sg_buf *buf = buf_priv; 2147952be9bSHans Verkuil struct sg_table *sgt = buf->dma_sgt; 2157952be9bSHans Verkuil 2167952be9bSHans Verkuil dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); 2177952be9bSHans Verkuil } 2187952be9bSHans Verkuil 2197952be9bSHans Verkuil static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr, 2207952be9bSHans Verkuil unsigned long size, 2217952be9bSHans Verkuil enum dma_data_direction dma_dir) 2227952be9bSHans Verkuil { 2237952be9bSHans Verkuil struct vb2_dma_sg_buf *buf; 2247952be9bSHans Verkuil struct sg_table *sgt; 2257952be9bSHans Verkuil struct frame_vector *vec; 2267952be9bSHans Verkuil 2277952be9bSHans Verkuil if (WARN_ON(!dev)) 2287952be9bSHans Verkuil return ERR_PTR(-EINVAL); 2297952be9bSHans Verkuil 2307952be9bSHans Verkuil buf = kzalloc(sizeof *buf, GFP_KERNEL); 2317952be9bSHans Verkuil if (!buf) 2327952be9bSHans Verkuil return ERR_PTR(-ENOMEM); 2337952be9bSHans Verkuil 2347952be9bSHans Verkuil buf->vaddr = NULL; 2357952be9bSHans Verkuil buf->dev = dev; 2367952be9bSHans Verkuil buf->dma_dir = dma_dir; 2377952be9bSHans Verkuil buf->offset = vaddr & ~PAGE_MASK; 2387952be9bSHans Verkuil buf->size = size; 2397952be9bSHans Verkuil buf->dma_sgt = &buf->sg_table; 24070794724SHans Verkuil vec = vb2_create_framevec(vaddr, size); 2417952be9bSHans Verkuil if (IS_ERR(vec)) 2427952be9bSHans Verkuil goto userptr_fail_pfnvec; 2437952be9bSHans Verkuil buf->vec = vec; 2447952be9bSHans Verkuil 2457952be9bSHans Verkuil buf->pages = frame_vector_pages(vec); 2467952be9bSHans Verkuil if (IS_ERR(buf->pages)) 2477952be9bSHans Verkuil goto userptr_fail_sgtable; 2487952be9bSHans Verkuil buf->num_pages = frame_vector_count(vec); 2497952be9bSHans Verkuil 2507952be9bSHans Verkuil if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, 2517952be9bSHans Verkuil buf->num_pages, buf->offset, size, 0)) 2527952be9bSHans Verkuil goto userptr_fail_sgtable; 2537952be9bSHans Verkuil 2547952be9bSHans Verkuil sgt = &buf->sg_table; 2557952be9bSHans Verkuil /* 2567952be9bSHans Verkuil * No need to sync to the device, this will happen later when the 2577952be9bSHans Verkuil * prepare() memop is called. 2587952be9bSHans Verkuil */ 2597952be9bSHans Verkuil sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, 2607952be9bSHans Verkuil buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); 2617952be9bSHans Verkuil if (!sgt->nents) 2627952be9bSHans Verkuil goto userptr_fail_map; 2637952be9bSHans Verkuil 2647952be9bSHans Verkuil return buf; 2657952be9bSHans Verkuil 2667952be9bSHans Verkuil userptr_fail_map: 2677952be9bSHans Verkuil sg_free_table(&buf->sg_table); 2687952be9bSHans Verkuil userptr_fail_sgtable: 2697952be9bSHans Verkuil vb2_destroy_framevec(vec); 2707952be9bSHans Verkuil userptr_fail_pfnvec: 2717952be9bSHans Verkuil kfree(buf); 2727952be9bSHans Verkuil return ERR_PTR(-ENOMEM); 2737952be9bSHans Verkuil } 2747952be9bSHans Verkuil 2757952be9bSHans Verkuil /* 2767952be9bSHans Verkuil * @put_userptr: inform the allocator that a USERPTR buffer will no longer 2777952be9bSHans Verkuil * be used 2787952be9bSHans Verkuil */ 2797952be9bSHans Verkuil static void vb2_dma_sg_put_userptr(void *buf_priv) 2807952be9bSHans Verkuil { 2817952be9bSHans Verkuil struct vb2_dma_sg_buf *buf = buf_priv; 2827952be9bSHans Verkuil struct sg_table *sgt = &buf->sg_table; 2837952be9bSHans Verkuil int i = buf->num_pages; 2847952be9bSHans Verkuil 2857952be9bSHans Verkuil dprintk(1, "%s: Releasing userspace buffer of %d pages\n", 2867952be9bSHans Verkuil __func__, buf->num_pages); 2877952be9bSHans Verkuil dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir, 2887952be9bSHans Verkuil DMA_ATTR_SKIP_CPU_SYNC); 2897952be9bSHans Verkuil if (buf->vaddr) 2907952be9bSHans Verkuil vm_unmap_ram(buf->vaddr, buf->num_pages); 2917952be9bSHans Verkuil sg_free_table(buf->dma_sgt); 2927952be9bSHans Verkuil if (buf->dma_dir == DMA_FROM_DEVICE || 2937952be9bSHans Verkuil buf->dma_dir == DMA_BIDIRECTIONAL) 2947952be9bSHans Verkuil while (--i >= 0) 2957952be9bSHans Verkuil set_page_dirty_lock(buf->pages[i]); 2967952be9bSHans Verkuil vb2_destroy_framevec(buf->vec); 2977952be9bSHans Verkuil kfree(buf); 2987952be9bSHans Verkuil } 2997952be9bSHans Verkuil 3007952be9bSHans Verkuil static void *vb2_dma_sg_vaddr(void *buf_priv) 3017952be9bSHans Verkuil { 3027952be9bSHans Verkuil struct vb2_dma_sg_buf *buf = buf_priv; 303*6619ccf1SThomas Zimmermann struct dma_buf_map map; 304*6619ccf1SThomas Zimmermann int ret; 3057952be9bSHans Verkuil 3067952be9bSHans Verkuil BUG_ON(!buf); 3077952be9bSHans Verkuil 3087952be9bSHans Verkuil if (!buf->vaddr) { 309*6619ccf1SThomas Zimmermann if (buf->db_attach) { 310*6619ccf1SThomas Zimmermann ret = dma_buf_vmap(buf->db_attach->dmabuf, &map); 311*6619ccf1SThomas Zimmermann buf->vaddr = ret ? NULL : map.vaddr; 312*6619ccf1SThomas Zimmermann } else { 313d4efd79aSChristoph Hellwig buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1); 3147952be9bSHans Verkuil } 315*6619ccf1SThomas Zimmermann } 3167952be9bSHans Verkuil 3177952be9bSHans Verkuil /* add offset in case userptr is not page-aligned */ 3187952be9bSHans Verkuil return buf->vaddr ? buf->vaddr + buf->offset : NULL; 3197952be9bSHans Verkuil } 3207952be9bSHans Verkuil 3217952be9bSHans Verkuil static unsigned int vb2_dma_sg_num_users(void *buf_priv) 3227952be9bSHans Verkuil { 3237952be9bSHans Verkuil struct vb2_dma_sg_buf *buf = buf_priv; 3247952be9bSHans Verkuil 3257952be9bSHans Verkuil return refcount_read(&buf->refcount); 3267952be9bSHans Verkuil } 3277952be9bSHans Verkuil 3287952be9bSHans Verkuil static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma) 3297952be9bSHans Verkuil { 3307952be9bSHans Verkuil struct vb2_dma_sg_buf *buf = buf_priv; 331a17ae147SSouptick Joarder int err; 3327952be9bSHans Verkuil 3337952be9bSHans Verkuil if (!buf) { 3347952be9bSHans Verkuil printk(KERN_ERR "No memory to map\n"); 3357952be9bSHans Verkuil return -EINVAL; 3367952be9bSHans Verkuil } 3377952be9bSHans Verkuil 338a17ae147SSouptick Joarder err = vm_map_pages(vma, buf->pages, buf->num_pages); 339a17ae147SSouptick Joarder if (err) { 340a17ae147SSouptick Joarder printk(KERN_ERR "Remapping memory, error: %d\n", err); 341a17ae147SSouptick Joarder return err; 3427952be9bSHans Verkuil } 3437952be9bSHans Verkuil 3447952be9bSHans Verkuil /* 3457952be9bSHans Verkuil * Use common vm_area operations to track buffer refcount. 3467952be9bSHans Verkuil */ 3477952be9bSHans Verkuil vma->vm_private_data = &buf->handler; 3487952be9bSHans Verkuil vma->vm_ops = &vb2_common_vm_ops; 3497952be9bSHans Verkuil 3507952be9bSHans Verkuil vma->vm_ops->open(vma); 3517952be9bSHans Verkuil 3527952be9bSHans Verkuil return 0; 3537952be9bSHans Verkuil } 3547952be9bSHans Verkuil 3557952be9bSHans Verkuil /*********************************************/ 3567952be9bSHans Verkuil /* DMABUF ops for exporters */ 3577952be9bSHans Verkuil /*********************************************/ 3587952be9bSHans Verkuil 3597952be9bSHans Verkuil struct vb2_dma_sg_attachment { 3607952be9bSHans Verkuil struct sg_table sgt; 3617952be9bSHans Verkuil enum dma_data_direction dma_dir; 3627952be9bSHans Verkuil }; 3637952be9bSHans Verkuil 364a19741e5SChristian König static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, 3657952be9bSHans Verkuil struct dma_buf_attachment *dbuf_attach) 3667952be9bSHans Verkuil { 3677952be9bSHans Verkuil struct vb2_dma_sg_attachment *attach; 3687952be9bSHans Verkuil unsigned int i; 3697952be9bSHans Verkuil struct scatterlist *rd, *wr; 3707952be9bSHans Verkuil struct sg_table *sgt; 3717952be9bSHans Verkuil struct vb2_dma_sg_buf *buf = dbuf->priv; 3727952be9bSHans Verkuil int ret; 3737952be9bSHans Verkuil 3747952be9bSHans Verkuil attach = kzalloc(sizeof(*attach), GFP_KERNEL); 3757952be9bSHans Verkuil if (!attach) 3767952be9bSHans Verkuil return -ENOMEM; 3777952be9bSHans Verkuil 3787952be9bSHans Verkuil sgt = &attach->sgt; 3797952be9bSHans Verkuil /* Copy the buf->base_sgt scatter list to the attachment, as we can't 3807952be9bSHans Verkuil * map the same scatter list to multiple attachments at the same time. 3817952be9bSHans Verkuil */ 3827952be9bSHans Verkuil ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL); 3837952be9bSHans Verkuil if (ret) { 3847952be9bSHans Verkuil kfree(attach); 3857952be9bSHans Verkuil return -ENOMEM; 3867952be9bSHans Verkuil } 3877952be9bSHans Verkuil 3887952be9bSHans Verkuil rd = buf->dma_sgt->sgl; 3897952be9bSHans Verkuil wr = sgt->sgl; 3907952be9bSHans Verkuil for (i = 0; i < sgt->orig_nents; ++i) { 3917952be9bSHans Verkuil sg_set_page(wr, sg_page(rd), rd->length, rd->offset); 3927952be9bSHans Verkuil rd = sg_next(rd); 3937952be9bSHans Verkuil wr = sg_next(wr); 3947952be9bSHans Verkuil } 3957952be9bSHans Verkuil 3967952be9bSHans Verkuil attach->dma_dir = DMA_NONE; 3977952be9bSHans Verkuil dbuf_attach->priv = attach; 3987952be9bSHans Verkuil 3997952be9bSHans Verkuil return 0; 4007952be9bSHans Verkuil } 4017952be9bSHans Verkuil 4027952be9bSHans Verkuil static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf, 4037952be9bSHans Verkuil struct dma_buf_attachment *db_attach) 4047952be9bSHans Verkuil { 4057952be9bSHans Verkuil struct vb2_dma_sg_attachment *attach = db_attach->priv; 4067952be9bSHans Verkuil struct sg_table *sgt; 4077952be9bSHans Verkuil 4087952be9bSHans Verkuil if (!attach) 4097952be9bSHans Verkuil return; 4107952be9bSHans Verkuil 4117952be9bSHans Verkuil sgt = &attach->sgt; 4127952be9bSHans Verkuil 4137952be9bSHans Verkuil /* release the scatterlist cache */ 4147952be9bSHans Verkuil if (attach->dma_dir != DMA_NONE) 4157952be9bSHans Verkuil dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, 4167952be9bSHans Verkuil attach->dma_dir); 4177952be9bSHans Verkuil sg_free_table(sgt); 4187952be9bSHans Verkuil kfree(attach); 4197952be9bSHans Verkuil db_attach->priv = NULL; 4207952be9bSHans Verkuil } 4217952be9bSHans Verkuil 4227952be9bSHans Verkuil static struct sg_table *vb2_dma_sg_dmabuf_ops_map( 4237952be9bSHans Verkuil struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) 4247952be9bSHans Verkuil { 4257952be9bSHans Verkuil struct vb2_dma_sg_attachment *attach = db_attach->priv; 4267952be9bSHans Verkuil /* stealing dmabuf mutex to serialize map/unmap operations */ 4277952be9bSHans Verkuil struct mutex *lock = &db_attach->dmabuf->lock; 4287952be9bSHans Verkuil struct sg_table *sgt; 4297952be9bSHans Verkuil 4307952be9bSHans Verkuil mutex_lock(lock); 4317952be9bSHans Verkuil 4327952be9bSHans Verkuil sgt = &attach->sgt; 4337952be9bSHans Verkuil /* return previously mapped sg table */ 4347952be9bSHans Verkuil if (attach->dma_dir == dma_dir) { 4357952be9bSHans Verkuil mutex_unlock(lock); 4367952be9bSHans Verkuil return sgt; 4377952be9bSHans Verkuil } 4387952be9bSHans Verkuil 4397952be9bSHans Verkuil /* release any previous cache */ 4407952be9bSHans Verkuil if (attach->dma_dir != DMA_NONE) { 4417952be9bSHans Verkuil dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, 4427952be9bSHans Verkuil attach->dma_dir); 4437952be9bSHans Verkuil attach->dma_dir = DMA_NONE; 4447952be9bSHans Verkuil } 4457952be9bSHans Verkuil 4467952be9bSHans Verkuil /* mapping to the client with new direction */ 4477952be9bSHans Verkuil sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, 4487952be9bSHans Verkuil dma_dir); 4497952be9bSHans Verkuil if (!sgt->nents) { 4507952be9bSHans Verkuil pr_err("failed to map scatterlist\n"); 4517952be9bSHans Verkuil mutex_unlock(lock); 4527952be9bSHans Verkuil return ERR_PTR(-EIO); 4537952be9bSHans Verkuil } 4547952be9bSHans Verkuil 4557952be9bSHans Verkuil attach->dma_dir = dma_dir; 4567952be9bSHans Verkuil 4577952be9bSHans Verkuil mutex_unlock(lock); 4587952be9bSHans Verkuil 4597952be9bSHans Verkuil return sgt; 4607952be9bSHans Verkuil } 4617952be9bSHans Verkuil 4627952be9bSHans Verkuil static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, 4637952be9bSHans Verkuil struct sg_table *sgt, enum dma_data_direction dma_dir) 4647952be9bSHans Verkuil { 4657952be9bSHans Verkuil /* nothing to be done here */ 4667952be9bSHans Verkuil } 4677952be9bSHans Verkuil 4687952be9bSHans Verkuil static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf) 4697952be9bSHans Verkuil { 4707952be9bSHans Verkuil /* drop reference obtained in vb2_dma_sg_get_dmabuf */ 4717952be9bSHans Verkuil vb2_dma_sg_put(dbuf->priv); 4727952be9bSHans Verkuil } 4737952be9bSHans Verkuil 474d4db5eb5SSergey Senozhatsky static int 475d4db5eb5SSergey Senozhatsky vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf, 476d4db5eb5SSergey Senozhatsky enum dma_data_direction direction) 477d4db5eb5SSergey Senozhatsky { 478d4db5eb5SSergey Senozhatsky struct vb2_dma_sg_buf *buf = dbuf->priv; 479d4db5eb5SSergey Senozhatsky struct sg_table *sgt = buf->dma_sgt; 480d4db5eb5SSergey Senozhatsky 481d4db5eb5SSergey Senozhatsky dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); 482d4db5eb5SSergey Senozhatsky return 0; 483d4db5eb5SSergey Senozhatsky } 484d4db5eb5SSergey Senozhatsky 485d4db5eb5SSergey Senozhatsky static int 486d4db5eb5SSergey Senozhatsky vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf, 487d4db5eb5SSergey Senozhatsky enum dma_data_direction direction) 488d4db5eb5SSergey Senozhatsky { 489d4db5eb5SSergey Senozhatsky struct vb2_dma_sg_buf *buf = dbuf->priv; 490d4db5eb5SSergey Senozhatsky struct sg_table *sgt = buf->dma_sgt; 491d4db5eb5SSergey Senozhatsky 492d4db5eb5SSergey Senozhatsky dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); 493d4db5eb5SSergey Senozhatsky return 0; 494d4db5eb5SSergey Senozhatsky } 495d4db5eb5SSergey Senozhatsky 496*6619ccf1SThomas Zimmermann static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map) 4977952be9bSHans Verkuil { 4987952be9bSHans Verkuil struct vb2_dma_sg_buf *buf = dbuf->priv; 4997952be9bSHans Verkuil 500*6619ccf1SThomas Zimmermann dma_buf_map_set_vaddr(map, buf->vaddr); 501*6619ccf1SThomas Zimmermann 502*6619ccf1SThomas Zimmermann return 0; 5037952be9bSHans Verkuil } 5047952be9bSHans Verkuil 5057952be9bSHans Verkuil static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf, 5067952be9bSHans Verkuil struct vm_area_struct *vma) 5077952be9bSHans Verkuil { 5087952be9bSHans Verkuil return vb2_dma_sg_mmap(dbuf->priv, vma); 5097952be9bSHans Verkuil } 5107952be9bSHans Verkuil 5117952be9bSHans Verkuil static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = { 5127952be9bSHans Verkuil .attach = vb2_dma_sg_dmabuf_ops_attach, 5137952be9bSHans Verkuil .detach = vb2_dma_sg_dmabuf_ops_detach, 5147952be9bSHans Verkuil .map_dma_buf = vb2_dma_sg_dmabuf_ops_map, 5157952be9bSHans Verkuil .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap, 516d4db5eb5SSergey Senozhatsky .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access, 517d4db5eb5SSergey Senozhatsky .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access, 5187952be9bSHans Verkuil .vmap = vb2_dma_sg_dmabuf_ops_vmap, 5197952be9bSHans Verkuil .mmap = vb2_dma_sg_dmabuf_ops_mmap, 5207952be9bSHans Verkuil .release = vb2_dma_sg_dmabuf_ops_release, 5217952be9bSHans Verkuil }; 5227952be9bSHans Verkuil 5237952be9bSHans Verkuil static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags) 5247952be9bSHans Verkuil { 5257952be9bSHans Verkuil struct vb2_dma_sg_buf *buf = buf_priv; 5267952be9bSHans Verkuil struct dma_buf *dbuf; 5277952be9bSHans Verkuil DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 5287952be9bSHans Verkuil 5297952be9bSHans Verkuil exp_info.ops = &vb2_dma_sg_dmabuf_ops; 5307952be9bSHans Verkuil exp_info.size = buf->size; 5317952be9bSHans Verkuil exp_info.flags = flags; 5327952be9bSHans Verkuil exp_info.priv = buf; 5337952be9bSHans Verkuil 5347952be9bSHans Verkuil if (WARN_ON(!buf->dma_sgt)) 5357952be9bSHans Verkuil return NULL; 5367952be9bSHans Verkuil 5377952be9bSHans Verkuil dbuf = dma_buf_export(&exp_info); 5387952be9bSHans Verkuil if (IS_ERR(dbuf)) 5397952be9bSHans Verkuil return NULL; 5407952be9bSHans Verkuil 5417952be9bSHans Verkuil /* dmabuf keeps reference to vb2 buffer */ 5427952be9bSHans Verkuil refcount_inc(&buf->refcount); 5437952be9bSHans Verkuil 5447952be9bSHans Verkuil return dbuf; 5457952be9bSHans Verkuil } 5467952be9bSHans Verkuil 5477952be9bSHans Verkuil /*********************************************/ 5487952be9bSHans Verkuil /* callbacks for DMABUF buffers */ 5497952be9bSHans Verkuil /*********************************************/ 5507952be9bSHans Verkuil 5517952be9bSHans Verkuil static int vb2_dma_sg_map_dmabuf(void *mem_priv) 5527952be9bSHans Verkuil { 5537952be9bSHans Verkuil struct vb2_dma_sg_buf *buf = mem_priv; 5547952be9bSHans Verkuil struct sg_table *sgt; 5557952be9bSHans Verkuil 5567952be9bSHans Verkuil if (WARN_ON(!buf->db_attach)) { 5577952be9bSHans Verkuil pr_err("trying to pin a non attached buffer\n"); 5587952be9bSHans Verkuil return -EINVAL; 5597952be9bSHans Verkuil } 5607952be9bSHans Verkuil 5617952be9bSHans Verkuil if (WARN_ON(buf->dma_sgt)) { 5627952be9bSHans Verkuil pr_err("dmabuf buffer is already pinned\n"); 5637952be9bSHans Verkuil return 0; 5647952be9bSHans Verkuil } 5657952be9bSHans Verkuil 5667952be9bSHans Verkuil /* get the associated scatterlist for this buffer */ 5677952be9bSHans Verkuil sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir); 5687952be9bSHans Verkuil if (IS_ERR(sgt)) { 5697952be9bSHans Verkuil pr_err("Error getting dmabuf scatterlist\n"); 5707952be9bSHans Verkuil return -EINVAL; 5717952be9bSHans Verkuil } 5727952be9bSHans Verkuil 5737952be9bSHans Verkuil buf->dma_sgt = sgt; 5747952be9bSHans Verkuil buf->vaddr = NULL; 5757952be9bSHans Verkuil 5767952be9bSHans Verkuil return 0; 5777952be9bSHans Verkuil } 5787952be9bSHans Verkuil 5797952be9bSHans Verkuil static void vb2_dma_sg_unmap_dmabuf(void *mem_priv) 5807952be9bSHans Verkuil { 5817952be9bSHans Verkuil struct vb2_dma_sg_buf *buf = mem_priv; 5827952be9bSHans Verkuil struct sg_table *sgt = buf->dma_sgt; 5837952be9bSHans Verkuil 5847952be9bSHans Verkuil if (WARN_ON(!buf->db_attach)) { 5857952be9bSHans Verkuil pr_err("trying to unpin a not attached buffer\n"); 5867952be9bSHans Verkuil return; 5877952be9bSHans Verkuil } 5887952be9bSHans Verkuil 5897952be9bSHans Verkuil if (WARN_ON(!sgt)) { 5907952be9bSHans Verkuil pr_err("dmabuf buffer is already unpinned\n"); 5917952be9bSHans Verkuil return; 5927952be9bSHans Verkuil } 5937952be9bSHans Verkuil 5947952be9bSHans Verkuil if (buf->vaddr) { 5957952be9bSHans Verkuil dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr); 5967952be9bSHans Verkuil buf->vaddr = NULL; 5977952be9bSHans Verkuil } 5987952be9bSHans Verkuil dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir); 5997952be9bSHans Verkuil 6007952be9bSHans Verkuil buf->dma_sgt = NULL; 6017952be9bSHans Verkuil } 6027952be9bSHans Verkuil 6037952be9bSHans Verkuil static void vb2_dma_sg_detach_dmabuf(void *mem_priv) 6047952be9bSHans Verkuil { 6057952be9bSHans Verkuil struct vb2_dma_sg_buf *buf = mem_priv; 6067952be9bSHans Verkuil 6077952be9bSHans Verkuil /* if vb2 works correctly you should never detach mapped buffer */ 6087952be9bSHans Verkuil if (WARN_ON(buf->dma_sgt)) 6097952be9bSHans Verkuil vb2_dma_sg_unmap_dmabuf(buf); 6107952be9bSHans Verkuil 6117952be9bSHans Verkuil /* detach this attachment */ 6127952be9bSHans Verkuil dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach); 6137952be9bSHans Verkuil kfree(buf); 6147952be9bSHans Verkuil } 6157952be9bSHans Verkuil 6167952be9bSHans Verkuil static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf, 6177952be9bSHans Verkuil unsigned long size, enum dma_data_direction dma_dir) 6187952be9bSHans Verkuil { 6197952be9bSHans Verkuil struct vb2_dma_sg_buf *buf; 6207952be9bSHans Verkuil struct dma_buf_attachment *dba; 6217952be9bSHans Verkuil 6227952be9bSHans Verkuil if (WARN_ON(!dev)) 6237952be9bSHans Verkuil return ERR_PTR(-EINVAL); 6247952be9bSHans Verkuil 6257952be9bSHans Verkuil if (dbuf->size < size) 6267952be9bSHans Verkuil return ERR_PTR(-EFAULT); 6277952be9bSHans Verkuil 6287952be9bSHans Verkuil buf = kzalloc(sizeof(*buf), GFP_KERNEL); 6297952be9bSHans Verkuil if (!buf) 6307952be9bSHans Verkuil return ERR_PTR(-ENOMEM); 6317952be9bSHans Verkuil 6327952be9bSHans Verkuil buf->dev = dev; 6337952be9bSHans Verkuil /* create attachment for the dmabuf with the user device */ 6347952be9bSHans Verkuil dba = dma_buf_attach(dbuf, buf->dev); 6357952be9bSHans Verkuil if (IS_ERR(dba)) { 6367952be9bSHans Verkuil pr_err("failed to attach dmabuf\n"); 6377952be9bSHans Verkuil kfree(buf); 6387952be9bSHans Verkuil return dba; 6397952be9bSHans Verkuil } 6407952be9bSHans Verkuil 6417952be9bSHans Verkuil buf->dma_dir = dma_dir; 6427952be9bSHans Verkuil buf->size = size; 6437952be9bSHans Verkuil buf->db_attach = dba; 6447952be9bSHans Verkuil 6457952be9bSHans Verkuil return buf; 6467952be9bSHans Verkuil } 6477952be9bSHans Verkuil 6487952be9bSHans Verkuil static void *vb2_dma_sg_cookie(void *buf_priv) 6497952be9bSHans Verkuil { 6507952be9bSHans Verkuil struct vb2_dma_sg_buf *buf = buf_priv; 6517952be9bSHans Verkuil 6527952be9bSHans Verkuil return buf->dma_sgt; 6537952be9bSHans Verkuil } 6547952be9bSHans Verkuil 6557952be9bSHans Verkuil const struct vb2_mem_ops vb2_dma_sg_memops = { 6567952be9bSHans Verkuil .alloc = vb2_dma_sg_alloc, 6577952be9bSHans Verkuil .put = vb2_dma_sg_put, 6587952be9bSHans Verkuil .get_userptr = vb2_dma_sg_get_userptr, 6597952be9bSHans Verkuil .put_userptr = vb2_dma_sg_put_userptr, 6607952be9bSHans Verkuil .prepare = vb2_dma_sg_prepare, 6617952be9bSHans Verkuil .finish = vb2_dma_sg_finish, 6627952be9bSHans Verkuil .vaddr = vb2_dma_sg_vaddr, 6637952be9bSHans Verkuil .mmap = vb2_dma_sg_mmap, 6647952be9bSHans Verkuil .num_users = vb2_dma_sg_num_users, 6657952be9bSHans Verkuil .get_dmabuf = vb2_dma_sg_get_dmabuf, 6667952be9bSHans Verkuil .map_dmabuf = vb2_dma_sg_map_dmabuf, 6677952be9bSHans Verkuil .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf, 6687952be9bSHans Verkuil .attach_dmabuf = vb2_dma_sg_attach_dmabuf, 6697952be9bSHans Verkuil .detach_dmabuf = vb2_dma_sg_detach_dmabuf, 6707952be9bSHans Verkuil .cookie = vb2_dma_sg_cookie, 6717952be9bSHans Verkuil }; 6727952be9bSHans Verkuil EXPORT_SYMBOL_GPL(vb2_dma_sg_memops); 6737952be9bSHans Verkuil 6747952be9bSHans Verkuil MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2"); 6757952be9bSHans Verkuil MODULE_AUTHOR("Andrzej Pietrasiewicz"); 6767952be9bSHans Verkuil MODULE_LICENSE("GPL"); 677