1 /* 2 * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2 3 * 4 * Copyright (C) 2010 Samsung Electronics 5 * 6 * Author: Pawel Osciak <pawel@osciak.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation. 11 */ 12 13 #include <linux/io.h> 14 #include <linux/module.h> 15 #include <linux/mm.h> 16 #include <linux/refcount.h> 17 #include <linux/sched.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 21 #include <media/videobuf2-v4l2.h> 22 #include <media/videobuf2-vmalloc.h> 23 #include <media/videobuf2-memops.h> 24 25 struct vb2_vmalloc_buf { 26 void *vaddr; 27 struct frame_vector *vec; 28 enum dma_data_direction dma_dir; 29 unsigned long size; 30 refcount_t refcount; 31 struct vb2_vmarea_handler handler; 32 struct dma_buf *dbuf; 33 }; 34 35 static void vb2_vmalloc_put(void *buf_priv); 36 37 static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs, 38 unsigned long size, enum dma_data_direction dma_dir, 39 gfp_t gfp_flags) 40 { 41 struct vb2_vmalloc_buf *buf; 42 43 buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags); 44 if (!buf) 45 return ERR_PTR(-ENOMEM); 46 47 buf->size = size; 48 buf->vaddr = vmalloc_user(buf->size); 49 if (!buf->vaddr) { 50 pr_debug("vmalloc of size %ld failed\n", buf->size); 51 kfree(buf); 52 return ERR_PTR(-ENOMEM); 53 } 54 55 buf->dma_dir = dma_dir; 56 buf->handler.refcount = &buf->refcount; 57 buf->handler.put = vb2_vmalloc_put; 58 buf->handler.arg = buf; 59 60 refcount_set(&buf->refcount, 1); 61 return buf; 62 } 63 64 static void vb2_vmalloc_put(void *buf_priv) 65 { 66 struct vb2_vmalloc_buf *buf = buf_priv; 67 68 if (refcount_dec_and_test(&buf->refcount)) { 69 vfree(buf->vaddr); 70 kfree(buf); 71 } 72 } 73 74 static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr, 75 unsigned long size, 76 enum dma_data_direction dma_dir) 77 { 78 struct vb2_vmalloc_buf *buf; 79 struct frame_vector *vec; 80 int n_pages, offset, i; 81 int ret = -ENOMEM; 82 83 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 84 if (!buf) 85 return ERR_PTR(-ENOMEM); 86 87 buf->dma_dir = dma_dir; 88 offset = vaddr & ~PAGE_MASK; 89 buf->size = size; 90 vec = vb2_create_framevec(vaddr, size); 91 if (IS_ERR(vec)) { 92 ret = PTR_ERR(vec); 93 goto fail_pfnvec_create; 94 } 95 buf->vec = vec; 96 n_pages = frame_vector_count(vec); 97 if (frame_vector_to_pages(vec) < 0) { 98 unsigned long *nums = frame_vector_pfns(vec); 99 100 /* 101 * We cannot get page pointers for these pfns. Check memory is 102 * physically contiguous and use direct mapping. 103 */ 104 for (i = 1; i < n_pages; i++) 105 if (nums[i-1] + 1 != nums[i]) 106 goto fail_map; 107 buf->vaddr = (__force void *) 108 ioremap(__pfn_to_phys(nums[0]), size + offset); 109 } else { 110 buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1); 111 } 112 113 if (!buf->vaddr) 114 goto fail_map; 115 buf->vaddr += offset; 116 return buf; 117 118 fail_map: 119 vb2_destroy_framevec(vec); 120 fail_pfnvec_create: 121 kfree(buf); 122 123 return ERR_PTR(ret); 124 } 125 126 static void vb2_vmalloc_put_userptr(void *buf_priv) 127 { 128 struct vb2_vmalloc_buf *buf = buf_priv; 129 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK; 130 unsigned int i; 131 struct page **pages; 132 unsigned int n_pages; 133 134 if (!buf->vec->is_pfns) { 135 n_pages = frame_vector_count(buf->vec); 136 pages = frame_vector_pages(buf->vec); 137 if (vaddr) 138 vm_unmap_ram((void *)vaddr, n_pages); 139 if (buf->dma_dir == DMA_FROM_DEVICE || 140 buf->dma_dir == DMA_BIDIRECTIONAL) 141 for (i = 0; i < n_pages; i++) 142 set_page_dirty_lock(pages[i]); 143 } else { 144 iounmap((__force void __iomem *)buf->vaddr); 145 } 146 vb2_destroy_framevec(buf->vec); 147 kfree(buf); 148 } 149 150 static void *vb2_vmalloc_vaddr(void *buf_priv) 151 { 152 struct vb2_vmalloc_buf *buf = buf_priv; 153 154 if (!buf->vaddr) { 155 pr_err("Address of an unallocated plane requested or cannot map user pointer\n"); 156 return NULL; 157 } 158 159 return buf->vaddr; 160 } 161 162 static unsigned int vb2_vmalloc_num_users(void *buf_priv) 163 { 164 struct vb2_vmalloc_buf *buf = buf_priv; 165 return refcount_read(&buf->refcount); 166 } 167 168 static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma) 169 { 170 struct vb2_vmalloc_buf *buf = buf_priv; 171 int ret; 172 173 if (!buf) { 174 pr_err("No memory to map\n"); 175 return -EINVAL; 176 } 177 178 ret = remap_vmalloc_range(vma, buf->vaddr, 0); 179 if (ret) { 180 pr_err("Remapping vmalloc memory, error: %d\n", ret); 181 return ret; 182 } 183 184 /* 185 * Make sure that vm_areas for 2 buffers won't be merged together 186 */ 187 vma->vm_flags |= VM_DONTEXPAND; 188 189 /* 190 * Use common vm_area operations to track buffer refcount. 191 */ 192 vma->vm_private_data = &buf->handler; 193 vma->vm_ops = &vb2_common_vm_ops; 194 195 vma->vm_ops->open(vma); 196 197 return 0; 198 } 199 200 #ifdef CONFIG_HAS_DMA 201 /*********************************************/ 202 /* DMABUF ops for exporters */ 203 /*********************************************/ 204 205 struct vb2_vmalloc_attachment { 206 struct sg_table sgt; 207 enum dma_data_direction dma_dir; 208 }; 209 210 static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, 211 struct dma_buf_attachment *dbuf_attach) 212 { 213 struct vb2_vmalloc_attachment *attach; 214 struct vb2_vmalloc_buf *buf = dbuf->priv; 215 int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE; 216 struct sg_table *sgt; 217 struct scatterlist *sg; 218 void *vaddr = buf->vaddr; 219 int ret; 220 int i; 221 222 attach = kzalloc(sizeof(*attach), GFP_KERNEL); 223 if (!attach) 224 return -ENOMEM; 225 226 sgt = &attach->sgt; 227 ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL); 228 if (ret) { 229 kfree(attach); 230 return ret; 231 } 232 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 233 struct page *page = vmalloc_to_page(vaddr); 234 235 if (!page) { 236 sg_free_table(sgt); 237 kfree(attach); 238 return -ENOMEM; 239 } 240 sg_set_page(sg, page, PAGE_SIZE, 0); 241 vaddr += PAGE_SIZE; 242 } 243 244 attach->dma_dir = DMA_NONE; 245 dbuf_attach->priv = attach; 246 return 0; 247 } 248 249 static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf, 250 struct dma_buf_attachment *db_attach) 251 { 252 struct vb2_vmalloc_attachment *attach = db_attach->priv; 253 struct sg_table *sgt; 254 255 if (!attach) 256 return; 257 258 sgt = &attach->sgt; 259 260 /* release the scatterlist cache */ 261 if (attach->dma_dir != DMA_NONE) 262 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, 263 attach->dma_dir); 264 sg_free_table(sgt); 265 kfree(attach); 266 db_attach->priv = NULL; 267 } 268 269 static struct sg_table *vb2_vmalloc_dmabuf_ops_map( 270 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) 271 { 272 struct vb2_vmalloc_attachment *attach = db_attach->priv; 273 /* stealing dmabuf mutex to serialize map/unmap operations */ 274 struct mutex *lock = &db_attach->dmabuf->lock; 275 struct sg_table *sgt; 276 277 mutex_lock(lock); 278 279 sgt = &attach->sgt; 280 /* return previously mapped sg table */ 281 if (attach->dma_dir == dma_dir) { 282 mutex_unlock(lock); 283 return sgt; 284 } 285 286 /* release any previous cache */ 287 if (attach->dma_dir != DMA_NONE) { 288 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, 289 attach->dma_dir); 290 attach->dma_dir = DMA_NONE; 291 } 292 293 /* mapping to the client with new direction */ 294 sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, 295 dma_dir); 296 if (!sgt->nents) { 297 pr_err("failed to map scatterlist\n"); 298 mutex_unlock(lock); 299 return ERR_PTR(-EIO); 300 } 301 302 attach->dma_dir = dma_dir; 303 304 mutex_unlock(lock); 305 306 return sgt; 307 } 308 309 static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, 310 struct sg_table *sgt, enum dma_data_direction dma_dir) 311 { 312 /* nothing to be done here */ 313 } 314 315 static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf) 316 { 317 /* drop reference obtained in vb2_vmalloc_get_dmabuf */ 318 vb2_vmalloc_put(dbuf->priv); 319 } 320 321 static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map) 322 { 323 struct vb2_vmalloc_buf *buf = dbuf->priv; 324 325 dma_buf_map_set_vaddr(map, buf->vaddr); 326 327 return 0; 328 } 329 330 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf, 331 struct vm_area_struct *vma) 332 { 333 return vb2_vmalloc_mmap(dbuf->priv, vma); 334 } 335 336 static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = { 337 .attach = vb2_vmalloc_dmabuf_ops_attach, 338 .detach = vb2_vmalloc_dmabuf_ops_detach, 339 .map_dma_buf = vb2_vmalloc_dmabuf_ops_map, 340 .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap, 341 .vmap = vb2_vmalloc_dmabuf_ops_vmap, 342 .mmap = vb2_vmalloc_dmabuf_ops_mmap, 343 .release = vb2_vmalloc_dmabuf_ops_release, 344 }; 345 346 static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags) 347 { 348 struct vb2_vmalloc_buf *buf = buf_priv; 349 struct dma_buf *dbuf; 350 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 351 352 exp_info.ops = &vb2_vmalloc_dmabuf_ops; 353 exp_info.size = buf->size; 354 exp_info.flags = flags; 355 exp_info.priv = buf; 356 357 if (WARN_ON(!buf->vaddr)) 358 return NULL; 359 360 dbuf = dma_buf_export(&exp_info); 361 if (IS_ERR(dbuf)) 362 return NULL; 363 364 /* dmabuf keeps reference to vb2 buffer */ 365 refcount_inc(&buf->refcount); 366 367 return dbuf; 368 } 369 #endif /* CONFIG_HAS_DMA */ 370 371 372 /*********************************************/ 373 /* callbacks for DMABUF buffers */ 374 /*********************************************/ 375 376 static int vb2_vmalloc_map_dmabuf(void *mem_priv) 377 { 378 struct vb2_vmalloc_buf *buf = mem_priv; 379 struct dma_buf_map map; 380 int ret; 381 382 ret = dma_buf_vmap(buf->dbuf, &map); 383 if (ret) 384 return -EFAULT; 385 buf->vaddr = map.vaddr; 386 387 return 0; 388 } 389 390 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv) 391 { 392 struct vb2_vmalloc_buf *buf = mem_priv; 393 struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr); 394 395 dma_buf_vunmap(buf->dbuf, &map); 396 buf->vaddr = NULL; 397 } 398 399 static void vb2_vmalloc_detach_dmabuf(void *mem_priv) 400 { 401 struct vb2_vmalloc_buf *buf = mem_priv; 402 struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr); 403 404 if (buf->vaddr) 405 dma_buf_vunmap(buf->dbuf, &map); 406 407 kfree(buf); 408 } 409 410 static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf, 411 unsigned long size, enum dma_data_direction dma_dir) 412 { 413 struct vb2_vmalloc_buf *buf; 414 415 if (dbuf->size < size) 416 return ERR_PTR(-EFAULT); 417 418 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 419 if (!buf) 420 return ERR_PTR(-ENOMEM); 421 422 buf->dbuf = dbuf; 423 buf->dma_dir = dma_dir; 424 buf->size = size; 425 426 return buf; 427 } 428 429 430 const struct vb2_mem_ops vb2_vmalloc_memops = { 431 .alloc = vb2_vmalloc_alloc, 432 .put = vb2_vmalloc_put, 433 .get_userptr = vb2_vmalloc_get_userptr, 434 .put_userptr = vb2_vmalloc_put_userptr, 435 #ifdef CONFIG_HAS_DMA 436 .get_dmabuf = vb2_vmalloc_get_dmabuf, 437 #endif 438 .map_dmabuf = vb2_vmalloc_map_dmabuf, 439 .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf, 440 .attach_dmabuf = vb2_vmalloc_attach_dmabuf, 441 .detach_dmabuf = vb2_vmalloc_detach_dmabuf, 442 .vaddr = vb2_vmalloc_vaddr, 443 .mmap = vb2_vmalloc_mmap, 444 .num_users = vb2_vmalloc_num_users, 445 }; 446 EXPORT_SYMBOL_GPL(vb2_vmalloc_memops); 447 448 MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2"); 449 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>"); 450 MODULE_LICENSE("GPL"); 451