1 /* 2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2 3 * 4 * Copyright (C) 2010 Samsung Electronics 5 * 6 * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation. 11 */ 12 13 #include <linux/module.h> 14 #include <linux/mm.h> 15 #include <linux/refcount.h> 16 #include <linux/scatterlist.h> 17 #include <linux/sched.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 21 #include <media/videobuf2-v4l2.h> 22 #include <media/videobuf2-memops.h> 23 #include <media/videobuf2-dma-sg.h> 24 25 static int debug; 26 module_param(debug, int, 0644); 27 28 #define dprintk(level, fmt, arg...) \ 29 do { \ 30 if (debug >= level) \ 31 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \ 32 } while (0) 33 34 struct vb2_dma_sg_buf { 35 struct device *dev; 36 void *vaddr; 37 struct page **pages; 38 struct frame_vector *vec; 39 int offset; 40 enum dma_data_direction dma_dir; 41 struct sg_table sg_table; 42 /* 43 * This will point to sg_table when used with the MMAP or USERPTR 44 * memory model, and to the dma_buf sglist when used with the 45 * DMABUF memory model. 46 */ 47 struct sg_table *dma_sgt; 48 size_t size; 49 unsigned int num_pages; 50 refcount_t refcount; 51 struct vb2_vmarea_handler handler; 52 53 struct dma_buf_attachment *db_attach; 54 55 struct vb2_buffer *vb; 56 }; 57 58 static void vb2_dma_sg_put(void *buf_priv); 59 60 static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf, 61 gfp_t gfp_flags) 62 { 63 unsigned int last_page = 0; 64 unsigned long size = buf->size; 65 66 while (size > 0) { 67 struct page *pages; 68 int order; 69 int i; 70 71 order = get_order(size); 72 /* Don't over allocate*/ 73 if ((PAGE_SIZE << order) > size) 74 order--; 75 76 pages = NULL; 77 while (!pages) { 78 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO | 79 __GFP_NOWARN | gfp_flags, order); 80 if (pages) 81 break; 82 83 if (order == 0) { 84 while (last_page--) 85 __free_page(buf->pages[last_page]); 86 return -ENOMEM; 87 } 88 order--; 89 } 90 91 split_page(pages, order); 92 for (i = 0; i < (1 << order); i++) 93 buf->pages[last_page++] = &pages[i]; 94 95 size -= PAGE_SIZE << order; 96 } 97 98 return 0; 99 } 100 101 static void *vb2_dma_sg_alloc(struct vb2_buffer *vb, struct device *dev, 102 unsigned long size) 103 { 104 struct vb2_dma_sg_buf *buf; 105 struct sg_table *sgt; 106 int ret; 107 int num_pages; 108 109 if (WARN_ON(!dev) || WARN_ON(!size)) 110 return ERR_PTR(-EINVAL); 111 112 buf = kzalloc(sizeof *buf, GFP_KERNEL); 113 if (!buf) 114 return ERR_PTR(-ENOMEM); 115 116 buf->vaddr = NULL; 117 buf->dma_dir = vb->vb2_queue->dma_dir; 118 buf->offset = 0; 119 buf->size = size; 120 /* size is already page aligned */ 121 buf->num_pages = size >> PAGE_SHIFT; 122 buf->dma_sgt = &buf->sg_table; 123 124 /* 125 * NOTE: dma-sg allocates memory using the page allocator directly, so 126 * there is no memory consistency guarantee, hence dma-sg ignores DMA 127 * attributes passed from the upper layer. 128 */ 129 buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *), 130 GFP_KERNEL | __GFP_ZERO); 131 if (!buf->pages) 132 goto fail_pages_array_alloc; 133 134 ret = vb2_dma_sg_alloc_compacted(buf, vb->vb2_queue->gfp_flags); 135 if (ret) 136 goto fail_pages_alloc; 137 138 ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, 139 buf->num_pages, 0, size, GFP_KERNEL); 140 if (ret) 141 goto fail_table_alloc; 142 143 /* Prevent the device from being released while the buffer is used */ 144 buf->dev = get_device(dev); 145 146 sgt = &buf->sg_table; 147 /* 148 * No need to sync to the device, this will happen later when the 149 * prepare() memop is called. 150 */ 151 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir, 152 DMA_ATTR_SKIP_CPU_SYNC)) 153 goto fail_map; 154 155 buf->handler.refcount = &buf->refcount; 156 buf->handler.put = vb2_dma_sg_put; 157 buf->handler.arg = buf; 158 buf->vb = vb; 159 160 refcount_set(&buf->refcount, 1); 161 162 dprintk(1, "%s: Allocated buffer of %d pages\n", 163 __func__, buf->num_pages); 164 return buf; 165 166 fail_map: 167 put_device(buf->dev); 168 sg_free_table(buf->dma_sgt); 169 fail_table_alloc: 170 num_pages = buf->num_pages; 171 while (num_pages--) 172 __free_page(buf->pages[num_pages]); 173 fail_pages_alloc: 174 kvfree(buf->pages); 175 fail_pages_array_alloc: 176 kfree(buf); 177 return ERR_PTR(-ENOMEM); 178 } 179 180 static void vb2_dma_sg_put(void *buf_priv) 181 { 182 struct vb2_dma_sg_buf *buf = buf_priv; 183 struct sg_table *sgt = &buf->sg_table; 184 int i = buf->num_pages; 185 186 if (refcount_dec_and_test(&buf->refcount)) { 187 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, 188 buf->num_pages); 189 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, 190 DMA_ATTR_SKIP_CPU_SYNC); 191 if (buf->vaddr) 192 vm_unmap_ram(buf->vaddr, buf->num_pages); 193 sg_free_table(buf->dma_sgt); 194 while (--i >= 0) 195 __free_page(buf->pages[i]); 196 kvfree(buf->pages); 197 put_device(buf->dev); 198 kfree(buf); 199 } 200 } 201 202 static void vb2_dma_sg_prepare(void *buf_priv) 203 { 204 struct vb2_dma_sg_buf *buf = buf_priv; 205 struct sg_table *sgt = buf->dma_sgt; 206 207 if (buf->vb->skip_cache_sync_on_prepare) 208 return; 209 210 dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir); 211 } 212 213 static void vb2_dma_sg_finish(void *buf_priv) 214 { 215 struct vb2_dma_sg_buf *buf = buf_priv; 216 struct sg_table *sgt = buf->dma_sgt; 217 218 if (buf->vb->skip_cache_sync_on_finish) 219 return; 220 221 dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir); 222 } 223 224 static void *vb2_dma_sg_get_userptr(struct vb2_buffer *vb, struct device *dev, 225 unsigned long vaddr, unsigned long size) 226 { 227 struct vb2_dma_sg_buf *buf; 228 struct sg_table *sgt; 229 struct frame_vector *vec; 230 231 if (WARN_ON(!dev)) 232 return ERR_PTR(-EINVAL); 233 234 buf = kzalloc(sizeof *buf, GFP_KERNEL); 235 if (!buf) 236 return ERR_PTR(-ENOMEM); 237 238 buf->vaddr = NULL; 239 buf->dev = dev; 240 buf->dma_dir = vb->vb2_queue->dma_dir; 241 buf->offset = vaddr & ~PAGE_MASK; 242 buf->size = size; 243 buf->dma_sgt = &buf->sg_table; 244 vec = vb2_create_framevec(vaddr, size); 245 if (IS_ERR(vec)) 246 goto userptr_fail_pfnvec; 247 buf->vec = vec; 248 249 buf->pages = frame_vector_pages(vec); 250 if (IS_ERR(buf->pages)) 251 goto userptr_fail_sgtable; 252 buf->num_pages = frame_vector_count(vec); 253 254 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, 255 buf->num_pages, buf->offset, size, 0)) 256 goto userptr_fail_sgtable; 257 258 sgt = &buf->sg_table; 259 /* 260 * No need to sync to the device, this will happen later when the 261 * prepare() memop is called. 262 */ 263 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir, 264 DMA_ATTR_SKIP_CPU_SYNC)) 265 goto userptr_fail_map; 266 267 return buf; 268 269 userptr_fail_map: 270 sg_free_table(&buf->sg_table); 271 userptr_fail_sgtable: 272 vb2_destroy_framevec(vec); 273 userptr_fail_pfnvec: 274 kfree(buf); 275 return ERR_PTR(-ENOMEM); 276 } 277 278 /* 279 * @put_userptr: inform the allocator that a USERPTR buffer will no longer 280 * be used 281 */ 282 static void vb2_dma_sg_put_userptr(void *buf_priv) 283 { 284 struct vb2_dma_sg_buf *buf = buf_priv; 285 struct sg_table *sgt = &buf->sg_table; 286 int i = buf->num_pages; 287 288 dprintk(1, "%s: Releasing userspace buffer of %d pages\n", 289 __func__, buf->num_pages); 290 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); 291 if (buf->vaddr) 292 vm_unmap_ram(buf->vaddr, buf->num_pages); 293 sg_free_table(buf->dma_sgt); 294 if (buf->dma_dir == DMA_FROM_DEVICE || 295 buf->dma_dir == DMA_BIDIRECTIONAL) 296 while (--i >= 0) 297 set_page_dirty_lock(buf->pages[i]); 298 vb2_destroy_framevec(buf->vec); 299 kfree(buf); 300 } 301 302 static void *vb2_dma_sg_vaddr(struct vb2_buffer *vb, void *buf_priv) 303 { 304 struct vb2_dma_sg_buf *buf = buf_priv; 305 struct dma_buf_map map; 306 int ret; 307 308 BUG_ON(!buf); 309 310 if (!buf->vaddr) { 311 if (buf->db_attach) { 312 ret = dma_buf_vmap(buf->db_attach->dmabuf, &map); 313 buf->vaddr = ret ? NULL : map.vaddr; 314 } else { 315 buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1); 316 } 317 } 318 319 /* add offset in case userptr is not page-aligned */ 320 return buf->vaddr ? buf->vaddr + buf->offset : NULL; 321 } 322 323 static unsigned int vb2_dma_sg_num_users(void *buf_priv) 324 { 325 struct vb2_dma_sg_buf *buf = buf_priv; 326 327 return refcount_read(&buf->refcount); 328 } 329 330 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma) 331 { 332 struct vb2_dma_sg_buf *buf = buf_priv; 333 int err; 334 335 if (!buf) { 336 printk(KERN_ERR "No memory to map\n"); 337 return -EINVAL; 338 } 339 340 err = vm_map_pages(vma, buf->pages, buf->num_pages); 341 if (err) { 342 printk(KERN_ERR "Remapping memory, error: %d\n", err); 343 return err; 344 } 345 346 /* 347 * Use common vm_area operations to track buffer refcount. 348 */ 349 vma->vm_private_data = &buf->handler; 350 vma->vm_ops = &vb2_common_vm_ops; 351 352 vma->vm_ops->open(vma); 353 354 return 0; 355 } 356 357 /*********************************************/ 358 /* DMABUF ops for exporters */ 359 /*********************************************/ 360 361 struct vb2_dma_sg_attachment { 362 struct sg_table sgt; 363 enum dma_data_direction dma_dir; 364 }; 365 366 static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, 367 struct dma_buf_attachment *dbuf_attach) 368 { 369 struct vb2_dma_sg_attachment *attach; 370 unsigned int i; 371 struct scatterlist *rd, *wr; 372 struct sg_table *sgt; 373 struct vb2_dma_sg_buf *buf = dbuf->priv; 374 int ret; 375 376 attach = kzalloc(sizeof(*attach), GFP_KERNEL); 377 if (!attach) 378 return -ENOMEM; 379 380 sgt = &attach->sgt; 381 /* Copy the buf->base_sgt scatter list to the attachment, as we can't 382 * map the same scatter list to multiple attachments at the same time. 383 */ 384 ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL); 385 if (ret) { 386 kfree(attach); 387 return -ENOMEM; 388 } 389 390 rd = buf->dma_sgt->sgl; 391 wr = sgt->sgl; 392 for (i = 0; i < sgt->orig_nents; ++i) { 393 sg_set_page(wr, sg_page(rd), rd->length, rd->offset); 394 rd = sg_next(rd); 395 wr = sg_next(wr); 396 } 397 398 attach->dma_dir = DMA_NONE; 399 dbuf_attach->priv = attach; 400 401 return 0; 402 } 403 404 static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf, 405 struct dma_buf_attachment *db_attach) 406 { 407 struct vb2_dma_sg_attachment *attach = db_attach->priv; 408 struct sg_table *sgt; 409 410 if (!attach) 411 return; 412 413 sgt = &attach->sgt; 414 415 /* release the scatterlist cache */ 416 if (attach->dma_dir != DMA_NONE) 417 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0); 418 sg_free_table(sgt); 419 kfree(attach); 420 db_attach->priv = NULL; 421 } 422 423 static struct sg_table *vb2_dma_sg_dmabuf_ops_map( 424 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) 425 { 426 struct vb2_dma_sg_attachment *attach = db_attach->priv; 427 /* stealing dmabuf mutex to serialize map/unmap operations */ 428 struct mutex *lock = &db_attach->dmabuf->lock; 429 struct sg_table *sgt; 430 431 mutex_lock(lock); 432 433 sgt = &attach->sgt; 434 /* return previously mapped sg table */ 435 if (attach->dma_dir == dma_dir) { 436 mutex_unlock(lock); 437 return sgt; 438 } 439 440 /* release any previous cache */ 441 if (attach->dma_dir != DMA_NONE) { 442 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0); 443 attach->dma_dir = DMA_NONE; 444 } 445 446 /* mapping to the client with new direction */ 447 if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) { 448 pr_err("failed to map scatterlist\n"); 449 mutex_unlock(lock); 450 return ERR_PTR(-EIO); 451 } 452 453 attach->dma_dir = dma_dir; 454 455 mutex_unlock(lock); 456 457 return sgt; 458 } 459 460 static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, 461 struct sg_table *sgt, enum dma_data_direction dma_dir) 462 { 463 /* nothing to be done here */ 464 } 465 466 static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf) 467 { 468 /* drop reference obtained in vb2_dma_sg_get_dmabuf */ 469 vb2_dma_sg_put(dbuf->priv); 470 } 471 472 static int 473 vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf, 474 enum dma_data_direction direction) 475 { 476 struct vb2_dma_sg_buf *buf = dbuf->priv; 477 struct sg_table *sgt = buf->dma_sgt; 478 479 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); 480 return 0; 481 } 482 483 static int 484 vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf, 485 enum dma_data_direction direction) 486 { 487 struct vb2_dma_sg_buf *buf = dbuf->priv; 488 struct sg_table *sgt = buf->dma_sgt; 489 490 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); 491 return 0; 492 } 493 494 static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map) 495 { 496 struct vb2_dma_sg_buf *buf = dbuf->priv; 497 498 dma_buf_map_set_vaddr(map, buf->vaddr); 499 500 return 0; 501 } 502 503 static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf, 504 struct vm_area_struct *vma) 505 { 506 return vb2_dma_sg_mmap(dbuf->priv, vma); 507 } 508 509 static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = { 510 .attach = vb2_dma_sg_dmabuf_ops_attach, 511 .detach = vb2_dma_sg_dmabuf_ops_detach, 512 .map_dma_buf = vb2_dma_sg_dmabuf_ops_map, 513 .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap, 514 .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access, 515 .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access, 516 .vmap = vb2_dma_sg_dmabuf_ops_vmap, 517 .mmap = vb2_dma_sg_dmabuf_ops_mmap, 518 .release = vb2_dma_sg_dmabuf_ops_release, 519 }; 520 521 static struct dma_buf *vb2_dma_sg_get_dmabuf(struct vb2_buffer *vb, 522 void *buf_priv, 523 unsigned long flags) 524 { 525 struct vb2_dma_sg_buf *buf = buf_priv; 526 struct dma_buf *dbuf; 527 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 528 529 exp_info.ops = &vb2_dma_sg_dmabuf_ops; 530 exp_info.size = buf->size; 531 exp_info.flags = flags; 532 exp_info.priv = buf; 533 534 if (WARN_ON(!buf->dma_sgt)) 535 return NULL; 536 537 dbuf = dma_buf_export(&exp_info); 538 if (IS_ERR(dbuf)) 539 return NULL; 540 541 /* dmabuf keeps reference to vb2 buffer */ 542 refcount_inc(&buf->refcount); 543 544 return dbuf; 545 } 546 547 /*********************************************/ 548 /* callbacks for DMABUF buffers */ 549 /*********************************************/ 550 551 static int vb2_dma_sg_map_dmabuf(void *mem_priv) 552 { 553 struct vb2_dma_sg_buf *buf = mem_priv; 554 struct sg_table *sgt; 555 556 if (WARN_ON(!buf->db_attach)) { 557 pr_err("trying to pin a non attached buffer\n"); 558 return -EINVAL; 559 } 560 561 if (WARN_ON(buf->dma_sgt)) { 562 pr_err("dmabuf buffer is already pinned\n"); 563 return 0; 564 } 565 566 /* get the associated scatterlist for this buffer */ 567 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir); 568 if (IS_ERR(sgt)) { 569 pr_err("Error getting dmabuf scatterlist\n"); 570 return -EINVAL; 571 } 572 573 buf->dma_sgt = sgt; 574 buf->vaddr = NULL; 575 576 return 0; 577 } 578 579 static void vb2_dma_sg_unmap_dmabuf(void *mem_priv) 580 { 581 struct vb2_dma_sg_buf *buf = mem_priv; 582 struct sg_table *sgt = buf->dma_sgt; 583 struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr); 584 585 if (WARN_ON(!buf->db_attach)) { 586 pr_err("trying to unpin a not attached buffer\n"); 587 return; 588 } 589 590 if (WARN_ON(!sgt)) { 591 pr_err("dmabuf buffer is already unpinned\n"); 592 return; 593 } 594 595 if (buf->vaddr) { 596 dma_buf_vunmap(buf->db_attach->dmabuf, &map); 597 buf->vaddr = NULL; 598 } 599 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir); 600 601 buf->dma_sgt = NULL; 602 } 603 604 static void vb2_dma_sg_detach_dmabuf(void *mem_priv) 605 { 606 struct vb2_dma_sg_buf *buf = mem_priv; 607 608 /* if vb2 works correctly you should never detach mapped buffer */ 609 if (WARN_ON(buf->dma_sgt)) 610 vb2_dma_sg_unmap_dmabuf(buf); 611 612 /* detach this attachment */ 613 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach); 614 kfree(buf); 615 } 616 617 static void *vb2_dma_sg_attach_dmabuf(struct vb2_buffer *vb, struct device *dev, 618 struct dma_buf *dbuf, unsigned long size) 619 { 620 struct vb2_dma_sg_buf *buf; 621 struct dma_buf_attachment *dba; 622 623 if (WARN_ON(!dev)) 624 return ERR_PTR(-EINVAL); 625 626 if (dbuf->size < size) 627 return ERR_PTR(-EFAULT); 628 629 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 630 if (!buf) 631 return ERR_PTR(-ENOMEM); 632 633 buf->dev = dev; 634 /* create attachment for the dmabuf with the user device */ 635 dba = dma_buf_attach(dbuf, buf->dev); 636 if (IS_ERR(dba)) { 637 pr_err("failed to attach dmabuf\n"); 638 kfree(buf); 639 return dba; 640 } 641 642 buf->dma_dir = vb->vb2_queue->dma_dir; 643 buf->size = size; 644 buf->db_attach = dba; 645 646 return buf; 647 } 648 649 static void *vb2_dma_sg_cookie(struct vb2_buffer *vb, void *buf_priv) 650 { 651 struct vb2_dma_sg_buf *buf = buf_priv; 652 653 return buf->dma_sgt; 654 } 655 656 const struct vb2_mem_ops vb2_dma_sg_memops = { 657 .alloc = vb2_dma_sg_alloc, 658 .put = vb2_dma_sg_put, 659 .get_userptr = vb2_dma_sg_get_userptr, 660 .put_userptr = vb2_dma_sg_put_userptr, 661 .prepare = vb2_dma_sg_prepare, 662 .finish = vb2_dma_sg_finish, 663 .vaddr = vb2_dma_sg_vaddr, 664 .mmap = vb2_dma_sg_mmap, 665 .num_users = vb2_dma_sg_num_users, 666 .get_dmabuf = vb2_dma_sg_get_dmabuf, 667 .map_dmabuf = vb2_dma_sg_map_dmabuf, 668 .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf, 669 .attach_dmabuf = vb2_dma_sg_attach_dmabuf, 670 .detach_dmabuf = vb2_dma_sg_detach_dmabuf, 671 .cookie = vb2_dma_sg_cookie, 672 }; 673 EXPORT_SYMBOL_GPL(vb2_dma_sg_memops); 674 675 MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2"); 676 MODULE_AUTHOR("Andrzej Pietrasiewicz"); 677 MODULE_LICENSE("GPL"); 678 MODULE_IMPORT_NS(DMA_BUF); 679