1 /* 2 * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2 3 * 4 * Copyright (C) 2010 Samsung Electronics 5 * 6 * Author: Pawel Osciak <pawel@osciak.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation. 11 */ 12 13 #include <linux/io.h> 14 #include <linux/module.h> 15 #include <linux/mm.h> 16 #include <linux/refcount.h> 17 #include <linux/sched.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 21 #include <media/videobuf2-v4l2.h> 22 #include <media/videobuf2-vmalloc.h> 23 #include <media/videobuf2-memops.h> 24 25 struct vb2_vmalloc_buf { 26 void *vaddr; 27 struct frame_vector *vec; 28 enum dma_data_direction dma_dir; 29 unsigned long size; 30 refcount_t refcount; 31 struct vb2_vmarea_handler handler; 32 struct dma_buf *dbuf; 33 }; 34 35 static void vb2_vmalloc_put(void *buf_priv); 36 37 static void *vb2_vmalloc_alloc(struct vb2_buffer *vb, struct device *dev, 38 unsigned long size) 39 { 40 struct vb2_vmalloc_buf *buf; 41 42 buf = kzalloc(sizeof(*buf), GFP_KERNEL | vb->vb2_queue->gfp_flags); 43 if (!buf) 44 return ERR_PTR(-ENOMEM); 45 46 buf->size = size; 47 buf->vaddr = vmalloc_user(buf->size); 48 if (!buf->vaddr) { 49 pr_debug("vmalloc of size %ld failed\n", buf->size); 50 kfree(buf); 51 return ERR_PTR(-ENOMEM); 52 } 53 54 buf->dma_dir = vb->vb2_queue->dma_dir; 55 buf->handler.refcount = &buf->refcount; 56 buf->handler.put = vb2_vmalloc_put; 57 buf->handler.arg = buf; 58 59 refcount_set(&buf->refcount, 1); 60 return buf; 61 } 62 63 static void vb2_vmalloc_put(void *buf_priv) 64 { 65 struct vb2_vmalloc_buf *buf = buf_priv; 66 67 if (refcount_dec_and_test(&buf->refcount)) { 68 vfree(buf->vaddr); 69 kfree(buf); 70 } 71 } 72 73 static void *vb2_vmalloc_get_userptr(struct vb2_buffer *vb, struct device *dev, 74 unsigned long vaddr, unsigned long size) 75 { 76 struct vb2_vmalloc_buf *buf; 77 struct frame_vector *vec; 78 int n_pages, offset, i; 79 int ret = -ENOMEM; 80 81 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 82 if (!buf) 83 return ERR_PTR(-ENOMEM); 84 85 buf->dma_dir = vb->vb2_queue->dma_dir; 86 offset = vaddr & ~PAGE_MASK; 87 buf->size = size; 88 vec = vb2_create_framevec(vaddr, size); 89 if (IS_ERR(vec)) { 90 ret = PTR_ERR(vec); 91 goto fail_pfnvec_create; 92 } 93 buf->vec = vec; 94 n_pages = frame_vector_count(vec); 95 if (frame_vector_to_pages(vec) < 0) { 96 unsigned long *nums = frame_vector_pfns(vec); 97 98 /* 99 * We cannot get page pointers for these pfns. Check memory is 100 * physically contiguous and use direct mapping. 101 */ 102 for (i = 1; i < n_pages; i++) 103 if (nums[i-1] + 1 != nums[i]) 104 goto fail_map; 105 buf->vaddr = (__force void *) 106 ioremap(__pfn_to_phys(nums[0]), size + offset); 107 } else { 108 buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1); 109 } 110 111 if (!buf->vaddr) 112 goto fail_map; 113 buf->vaddr += offset; 114 return buf; 115 116 fail_map: 117 vb2_destroy_framevec(vec); 118 fail_pfnvec_create: 119 kfree(buf); 120 121 return ERR_PTR(ret); 122 } 123 124 static void vb2_vmalloc_put_userptr(void *buf_priv) 125 { 126 struct vb2_vmalloc_buf *buf = buf_priv; 127 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK; 128 unsigned int i; 129 struct page **pages; 130 unsigned int n_pages; 131 132 if (!buf->vec->is_pfns) { 133 n_pages = frame_vector_count(buf->vec); 134 pages = frame_vector_pages(buf->vec); 135 if (vaddr) 136 vm_unmap_ram((void *)vaddr, n_pages); 137 if (buf->dma_dir == DMA_FROM_DEVICE || 138 buf->dma_dir == DMA_BIDIRECTIONAL) 139 for (i = 0; i < n_pages; i++) 140 set_page_dirty_lock(pages[i]); 141 } else { 142 iounmap((__force void __iomem *)buf->vaddr); 143 } 144 vb2_destroy_framevec(buf->vec); 145 kfree(buf); 146 } 147 148 static void *vb2_vmalloc_vaddr(struct vb2_buffer *vb, void *buf_priv) 149 { 150 struct vb2_vmalloc_buf *buf = buf_priv; 151 152 if (!buf->vaddr) { 153 pr_err("Address of an unallocated plane requested or cannot map user pointer\n"); 154 return NULL; 155 } 156 157 return buf->vaddr; 158 } 159 160 static unsigned int vb2_vmalloc_num_users(void *buf_priv) 161 { 162 struct vb2_vmalloc_buf *buf = buf_priv; 163 return refcount_read(&buf->refcount); 164 } 165 166 static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma) 167 { 168 struct vb2_vmalloc_buf *buf = buf_priv; 169 int ret; 170 171 if (!buf) { 172 pr_err("No memory to map\n"); 173 return -EINVAL; 174 } 175 176 ret = remap_vmalloc_range(vma, buf->vaddr, 0); 177 if (ret) { 178 pr_err("Remapping vmalloc memory, error: %d\n", ret); 179 return ret; 180 } 181 182 /* 183 * Make sure that vm_areas for 2 buffers won't be merged together 184 */ 185 vma->vm_flags |= VM_DONTEXPAND; 186 187 /* 188 * Use common vm_area operations to track buffer refcount. 189 */ 190 vma->vm_private_data = &buf->handler; 191 vma->vm_ops = &vb2_common_vm_ops; 192 193 vma->vm_ops->open(vma); 194 195 return 0; 196 } 197 198 #ifdef CONFIG_HAS_DMA 199 /*********************************************/ 200 /* DMABUF ops for exporters */ 201 /*********************************************/ 202 203 struct vb2_vmalloc_attachment { 204 struct sg_table sgt; 205 enum dma_data_direction dma_dir; 206 }; 207 208 static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, 209 struct dma_buf_attachment *dbuf_attach) 210 { 211 struct vb2_vmalloc_attachment *attach; 212 struct vb2_vmalloc_buf *buf = dbuf->priv; 213 int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE; 214 struct sg_table *sgt; 215 struct scatterlist *sg; 216 void *vaddr = buf->vaddr; 217 int ret; 218 int i; 219 220 attach = kzalloc(sizeof(*attach), GFP_KERNEL); 221 if (!attach) 222 return -ENOMEM; 223 224 sgt = &attach->sgt; 225 ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL); 226 if (ret) { 227 kfree(attach); 228 return ret; 229 } 230 for_each_sgtable_sg(sgt, sg, i) { 231 struct page *page = vmalloc_to_page(vaddr); 232 233 if (!page) { 234 sg_free_table(sgt); 235 kfree(attach); 236 return -ENOMEM; 237 } 238 sg_set_page(sg, page, PAGE_SIZE, 0); 239 vaddr += PAGE_SIZE; 240 } 241 242 attach->dma_dir = DMA_NONE; 243 dbuf_attach->priv = attach; 244 return 0; 245 } 246 247 static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf, 248 struct dma_buf_attachment *db_attach) 249 { 250 struct vb2_vmalloc_attachment *attach = db_attach->priv; 251 struct sg_table *sgt; 252 253 if (!attach) 254 return; 255 256 sgt = &attach->sgt; 257 258 /* release the scatterlist cache */ 259 if (attach->dma_dir != DMA_NONE) 260 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0); 261 sg_free_table(sgt); 262 kfree(attach); 263 db_attach->priv = NULL; 264 } 265 266 static struct sg_table *vb2_vmalloc_dmabuf_ops_map( 267 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) 268 { 269 struct vb2_vmalloc_attachment *attach = db_attach->priv; 270 struct sg_table *sgt; 271 272 sgt = &attach->sgt; 273 /* return previously mapped sg table */ 274 if (attach->dma_dir == dma_dir) 275 return sgt; 276 277 /* release any previous cache */ 278 if (attach->dma_dir != DMA_NONE) { 279 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0); 280 attach->dma_dir = DMA_NONE; 281 } 282 283 /* mapping to the client with new direction */ 284 if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) { 285 pr_err("failed to map scatterlist\n"); 286 return ERR_PTR(-EIO); 287 } 288 289 attach->dma_dir = dma_dir; 290 291 return sgt; 292 } 293 294 static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, 295 struct sg_table *sgt, enum dma_data_direction dma_dir) 296 { 297 /* nothing to be done here */ 298 } 299 300 static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf) 301 { 302 /* drop reference obtained in vb2_vmalloc_get_dmabuf */ 303 vb2_vmalloc_put(dbuf->priv); 304 } 305 306 static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf, 307 struct iosys_map *map) 308 { 309 struct vb2_vmalloc_buf *buf = dbuf->priv; 310 311 iosys_map_set_vaddr(map, buf->vaddr); 312 313 return 0; 314 } 315 316 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf, 317 struct vm_area_struct *vma) 318 { 319 return vb2_vmalloc_mmap(dbuf->priv, vma); 320 } 321 322 static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = { 323 .attach = vb2_vmalloc_dmabuf_ops_attach, 324 .detach = vb2_vmalloc_dmabuf_ops_detach, 325 .map_dma_buf = vb2_vmalloc_dmabuf_ops_map, 326 .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap, 327 .vmap = vb2_vmalloc_dmabuf_ops_vmap, 328 .mmap = vb2_vmalloc_dmabuf_ops_mmap, 329 .release = vb2_vmalloc_dmabuf_ops_release, 330 }; 331 332 static struct dma_buf *vb2_vmalloc_get_dmabuf(struct vb2_buffer *vb, 333 void *buf_priv, 334 unsigned long flags) 335 { 336 struct vb2_vmalloc_buf *buf = buf_priv; 337 struct dma_buf *dbuf; 338 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 339 340 exp_info.ops = &vb2_vmalloc_dmabuf_ops; 341 exp_info.size = buf->size; 342 exp_info.flags = flags; 343 exp_info.priv = buf; 344 345 if (WARN_ON(!buf->vaddr)) 346 return NULL; 347 348 dbuf = dma_buf_export(&exp_info); 349 if (IS_ERR(dbuf)) 350 return NULL; 351 352 /* dmabuf keeps reference to vb2 buffer */ 353 refcount_inc(&buf->refcount); 354 355 return dbuf; 356 } 357 #endif /* CONFIG_HAS_DMA */ 358 359 360 /*********************************************/ 361 /* callbacks for DMABUF buffers */ 362 /*********************************************/ 363 364 static int vb2_vmalloc_map_dmabuf(void *mem_priv) 365 { 366 struct vb2_vmalloc_buf *buf = mem_priv; 367 struct iosys_map map; 368 int ret; 369 370 ret = dma_buf_vmap_unlocked(buf->dbuf, &map); 371 if (ret) 372 return -EFAULT; 373 buf->vaddr = map.vaddr; 374 375 return 0; 376 } 377 378 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv) 379 { 380 struct vb2_vmalloc_buf *buf = mem_priv; 381 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr); 382 383 dma_buf_vunmap_unlocked(buf->dbuf, &map); 384 buf->vaddr = NULL; 385 } 386 387 static void vb2_vmalloc_detach_dmabuf(void *mem_priv) 388 { 389 struct vb2_vmalloc_buf *buf = mem_priv; 390 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr); 391 392 if (buf->vaddr) 393 dma_buf_vunmap_unlocked(buf->dbuf, &map); 394 395 kfree(buf); 396 } 397 398 static void *vb2_vmalloc_attach_dmabuf(struct vb2_buffer *vb, 399 struct device *dev, 400 struct dma_buf *dbuf, 401 unsigned long size) 402 { 403 struct vb2_vmalloc_buf *buf; 404 405 if (dbuf->size < size) 406 return ERR_PTR(-EFAULT); 407 408 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 409 if (!buf) 410 return ERR_PTR(-ENOMEM); 411 412 buf->dbuf = dbuf; 413 buf->dma_dir = vb->vb2_queue->dma_dir; 414 buf->size = size; 415 416 return buf; 417 } 418 419 420 const struct vb2_mem_ops vb2_vmalloc_memops = { 421 .alloc = vb2_vmalloc_alloc, 422 .put = vb2_vmalloc_put, 423 .get_userptr = vb2_vmalloc_get_userptr, 424 .put_userptr = vb2_vmalloc_put_userptr, 425 #ifdef CONFIG_HAS_DMA 426 .get_dmabuf = vb2_vmalloc_get_dmabuf, 427 #endif 428 .map_dmabuf = vb2_vmalloc_map_dmabuf, 429 .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf, 430 .attach_dmabuf = vb2_vmalloc_attach_dmabuf, 431 .detach_dmabuf = vb2_vmalloc_detach_dmabuf, 432 .vaddr = vb2_vmalloc_vaddr, 433 .mmap = vb2_vmalloc_mmap, 434 .num_users = vb2_vmalloc_num_users, 435 }; 436 EXPORT_SYMBOL_GPL(vb2_vmalloc_memops); 437 438 MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2"); 439 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>"); 440 MODULE_LICENSE("GPL"); 441 MODULE_IMPORT_NS(DMA_BUF); 442