1 /* 2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2 3 * 4 * Copyright (C) 2010 Samsung Electronics 5 * 6 * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation. 11 */ 12 13 #include <linux/module.h> 14 #include <linux/mm.h> 15 #include <linux/refcount.h> 16 #include <linux/scatterlist.h> 17 #include <linux/sched.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 21 #include <media/videobuf2-v4l2.h> 22 #include <media/videobuf2-memops.h> 23 #include <media/videobuf2-dma-sg.h> 24 25 static int debug; 26 module_param(debug, int, 0644); 27 28 #define dprintk(level, fmt, arg...) \ 29 do { \ 30 if (debug >= level) \ 31 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \ 32 } while (0) 33 34 struct vb2_dma_sg_buf { 35 struct device *dev; 36 void *vaddr; 37 struct page **pages; 38 struct frame_vector *vec; 39 int offset; 40 enum dma_data_direction dma_dir; 41 struct sg_table sg_table; 42 /* 43 * This will point to sg_table when used with the MMAP or USERPTR 44 * memory model, and to the dma_buf sglist when used with the 45 * DMABUF memory model. 46 */ 47 struct sg_table *dma_sgt; 48 size_t size; 49 unsigned int num_pages; 50 refcount_t refcount; 51 struct vb2_vmarea_handler handler; 52 53 struct dma_buf_attachment *db_attach; 54 55 struct vb2_buffer *vb; 56 }; 57 58 static void vb2_dma_sg_put(void *buf_priv); 59 60 static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf, 61 gfp_t gfp_flags) 62 { 63 unsigned int last_page = 0; 64 unsigned long size = buf->size; 65 66 while (size > 0) { 67 struct page *pages; 68 int order; 69 int i; 70 71 order = get_order(size); 72 /* Don't over allocate*/ 73 if ((PAGE_SIZE << order) > size) 74 order--; 75 76 pages = NULL; 77 while (!pages) { 78 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO | 79 __GFP_NOWARN | gfp_flags, order); 80 if (pages) 81 break; 82 83 if (order == 0) { 84 while (last_page--) 85 __free_page(buf->pages[last_page]); 86 return -ENOMEM; 87 } 88 order--; 89 } 90 91 split_page(pages, order); 92 for (i = 0; i < (1 << order); i++) 93 buf->pages[last_page++] = &pages[i]; 94 95 size -= PAGE_SIZE << order; 96 } 97 98 return 0; 99 } 100 101 static void *vb2_dma_sg_alloc(struct vb2_buffer *vb, struct device *dev, 102 unsigned long size) 103 { 104 struct vb2_dma_sg_buf *buf; 105 struct sg_table *sgt; 106 int ret; 107 int num_pages; 108 109 if (WARN_ON(!dev) || WARN_ON(!size)) 110 return ERR_PTR(-EINVAL); 111 112 buf = kzalloc(sizeof *buf, GFP_KERNEL); 113 if (!buf) 114 return ERR_PTR(-ENOMEM); 115 116 buf->vaddr = NULL; 117 buf->dma_dir = vb->vb2_queue->dma_dir; 118 buf->offset = 0; 119 buf->size = size; 120 /* size is already page aligned */ 121 buf->num_pages = size >> PAGE_SHIFT; 122 buf->dma_sgt = &buf->sg_table; 123 124 /* 125 * NOTE: dma-sg allocates memory using the page allocator directly, so 126 * there is no memory consistency guarantee, hence dma-sg ignores DMA 127 * attributes passed from the upper layer. 128 */ 129 buf->pages = kvcalloc(buf->num_pages, sizeof(struct page *), GFP_KERNEL); 130 if (!buf->pages) 131 goto fail_pages_array_alloc; 132 133 ret = vb2_dma_sg_alloc_compacted(buf, vb->vb2_queue->gfp_flags); 134 if (ret) 135 goto fail_pages_alloc; 136 137 ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, 138 buf->num_pages, 0, size, GFP_KERNEL); 139 if (ret) 140 goto fail_table_alloc; 141 142 /* Prevent the device from being released while the buffer is used */ 143 buf->dev = get_device(dev); 144 145 sgt = &buf->sg_table; 146 /* 147 * No need to sync to the device, this will happen later when the 148 * prepare() memop is called. 149 */ 150 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir, 151 DMA_ATTR_SKIP_CPU_SYNC)) 152 goto fail_map; 153 154 buf->handler.refcount = &buf->refcount; 155 buf->handler.put = vb2_dma_sg_put; 156 buf->handler.arg = buf; 157 buf->vb = vb; 158 159 refcount_set(&buf->refcount, 1); 160 161 dprintk(1, "%s: Allocated buffer of %d pages\n", 162 __func__, buf->num_pages); 163 return buf; 164 165 fail_map: 166 put_device(buf->dev); 167 sg_free_table(buf->dma_sgt); 168 fail_table_alloc: 169 num_pages = buf->num_pages; 170 while (num_pages--) 171 __free_page(buf->pages[num_pages]); 172 fail_pages_alloc: 173 kvfree(buf->pages); 174 fail_pages_array_alloc: 175 kfree(buf); 176 return ERR_PTR(-ENOMEM); 177 } 178 179 static void vb2_dma_sg_put(void *buf_priv) 180 { 181 struct vb2_dma_sg_buf *buf = buf_priv; 182 struct sg_table *sgt = &buf->sg_table; 183 int i = buf->num_pages; 184 185 if (refcount_dec_and_test(&buf->refcount)) { 186 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, 187 buf->num_pages); 188 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, 189 DMA_ATTR_SKIP_CPU_SYNC); 190 if (buf->vaddr) 191 vm_unmap_ram(buf->vaddr, buf->num_pages); 192 sg_free_table(buf->dma_sgt); 193 while (--i >= 0) 194 __free_page(buf->pages[i]); 195 kvfree(buf->pages); 196 put_device(buf->dev); 197 kfree(buf); 198 } 199 } 200 201 static void vb2_dma_sg_prepare(void *buf_priv) 202 { 203 struct vb2_dma_sg_buf *buf = buf_priv; 204 struct sg_table *sgt = buf->dma_sgt; 205 206 if (buf->vb->skip_cache_sync_on_prepare) 207 return; 208 209 dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir); 210 } 211 212 static void vb2_dma_sg_finish(void *buf_priv) 213 { 214 struct vb2_dma_sg_buf *buf = buf_priv; 215 struct sg_table *sgt = buf->dma_sgt; 216 217 if (buf->vb->skip_cache_sync_on_finish) 218 return; 219 220 dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir); 221 } 222 223 static void *vb2_dma_sg_get_userptr(struct vb2_buffer *vb, struct device *dev, 224 unsigned long vaddr, unsigned long size) 225 { 226 struct vb2_dma_sg_buf *buf; 227 struct sg_table *sgt; 228 struct frame_vector *vec; 229 230 if (WARN_ON(!dev)) 231 return ERR_PTR(-EINVAL); 232 233 buf = kzalloc(sizeof *buf, GFP_KERNEL); 234 if (!buf) 235 return ERR_PTR(-ENOMEM); 236 237 buf->vaddr = NULL; 238 buf->dev = dev; 239 buf->dma_dir = vb->vb2_queue->dma_dir; 240 buf->offset = vaddr & ~PAGE_MASK; 241 buf->size = size; 242 buf->dma_sgt = &buf->sg_table; 243 buf->vb = vb; 244 vec = vb2_create_framevec(vaddr, size); 245 if (IS_ERR(vec)) 246 goto userptr_fail_pfnvec; 247 buf->vec = vec; 248 249 buf->pages = frame_vector_pages(vec); 250 if (IS_ERR(buf->pages)) 251 goto userptr_fail_sgtable; 252 buf->num_pages = frame_vector_count(vec); 253 254 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, 255 buf->num_pages, buf->offset, size, 0)) 256 goto userptr_fail_sgtable; 257 258 sgt = &buf->sg_table; 259 /* 260 * No need to sync to the device, this will happen later when the 261 * prepare() memop is called. 262 */ 263 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir, 264 DMA_ATTR_SKIP_CPU_SYNC)) 265 goto userptr_fail_map; 266 267 return buf; 268 269 userptr_fail_map: 270 sg_free_table(&buf->sg_table); 271 userptr_fail_sgtable: 272 vb2_destroy_framevec(vec); 273 userptr_fail_pfnvec: 274 kfree(buf); 275 return ERR_PTR(-ENOMEM); 276 } 277 278 /* 279 * @put_userptr: inform the allocator that a USERPTR buffer will no longer 280 * be used 281 */ 282 static void vb2_dma_sg_put_userptr(void *buf_priv) 283 { 284 struct vb2_dma_sg_buf *buf = buf_priv; 285 struct sg_table *sgt = &buf->sg_table; 286 int i = buf->num_pages; 287 288 dprintk(1, "%s: Releasing userspace buffer of %d pages\n", 289 __func__, buf->num_pages); 290 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); 291 if (buf->vaddr) 292 vm_unmap_ram(buf->vaddr, buf->num_pages); 293 sg_free_table(buf->dma_sgt); 294 if (buf->dma_dir == DMA_FROM_DEVICE || 295 buf->dma_dir == DMA_BIDIRECTIONAL) 296 while (--i >= 0) 297 set_page_dirty_lock(buf->pages[i]); 298 vb2_destroy_framevec(buf->vec); 299 kfree(buf); 300 } 301 302 static void *vb2_dma_sg_vaddr(struct vb2_buffer *vb, void *buf_priv) 303 { 304 struct vb2_dma_sg_buf *buf = buf_priv; 305 struct iosys_map map; 306 int ret; 307 308 BUG_ON(!buf); 309 310 if (!buf->vaddr) { 311 if (buf->db_attach) { 312 ret = dma_buf_vmap_unlocked(buf->db_attach->dmabuf, &map); 313 buf->vaddr = ret ? NULL : map.vaddr; 314 } else { 315 buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1); 316 } 317 } 318 319 /* add offset in case userptr is not page-aligned */ 320 return buf->vaddr ? buf->vaddr + buf->offset : NULL; 321 } 322 323 static unsigned int vb2_dma_sg_num_users(void *buf_priv) 324 { 325 struct vb2_dma_sg_buf *buf = buf_priv; 326 327 return refcount_read(&buf->refcount); 328 } 329 330 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma) 331 { 332 struct vb2_dma_sg_buf *buf = buf_priv; 333 int err; 334 335 if (!buf) { 336 printk(KERN_ERR "No memory to map\n"); 337 return -EINVAL; 338 } 339 340 err = vm_map_pages(vma, buf->pages, buf->num_pages); 341 if (err) { 342 printk(KERN_ERR "Remapping memory, error: %d\n", err); 343 return err; 344 } 345 346 /* 347 * Use common vm_area operations to track buffer refcount. 348 */ 349 vma->vm_private_data = &buf->handler; 350 vma->vm_ops = &vb2_common_vm_ops; 351 352 vma->vm_ops->open(vma); 353 354 return 0; 355 } 356 357 /*********************************************/ 358 /* DMABUF ops for exporters */ 359 /*********************************************/ 360 361 struct vb2_dma_sg_attachment { 362 struct sg_table sgt; 363 enum dma_data_direction dma_dir; 364 }; 365 366 static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, 367 struct dma_buf_attachment *dbuf_attach) 368 { 369 struct vb2_dma_sg_attachment *attach; 370 unsigned int i; 371 struct scatterlist *rd, *wr; 372 struct sg_table *sgt; 373 struct vb2_dma_sg_buf *buf = dbuf->priv; 374 int ret; 375 376 attach = kzalloc(sizeof(*attach), GFP_KERNEL); 377 if (!attach) 378 return -ENOMEM; 379 380 sgt = &attach->sgt; 381 /* Copy the buf->base_sgt scatter list to the attachment, as we can't 382 * map the same scatter list to multiple attachments at the same time. 383 */ 384 ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL); 385 if (ret) { 386 kfree(attach); 387 return -ENOMEM; 388 } 389 390 rd = buf->dma_sgt->sgl; 391 wr = sgt->sgl; 392 for (i = 0; i < sgt->orig_nents; ++i) { 393 sg_set_page(wr, sg_page(rd), rd->length, rd->offset); 394 rd = sg_next(rd); 395 wr = sg_next(wr); 396 } 397 398 attach->dma_dir = DMA_NONE; 399 dbuf_attach->priv = attach; 400 401 return 0; 402 } 403 404 static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf, 405 struct dma_buf_attachment *db_attach) 406 { 407 struct vb2_dma_sg_attachment *attach = db_attach->priv; 408 struct sg_table *sgt; 409 410 if (!attach) 411 return; 412 413 sgt = &attach->sgt; 414 415 /* release the scatterlist cache */ 416 if (attach->dma_dir != DMA_NONE) 417 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0); 418 sg_free_table(sgt); 419 kfree(attach); 420 db_attach->priv = NULL; 421 } 422 423 static struct sg_table *vb2_dma_sg_dmabuf_ops_map( 424 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) 425 { 426 struct vb2_dma_sg_attachment *attach = db_attach->priv; 427 struct sg_table *sgt; 428 429 sgt = &attach->sgt; 430 /* return previously mapped sg table */ 431 if (attach->dma_dir == dma_dir) 432 return sgt; 433 434 /* release any previous cache */ 435 if (attach->dma_dir != DMA_NONE) { 436 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0); 437 attach->dma_dir = DMA_NONE; 438 } 439 440 /* mapping to the client with new direction */ 441 if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) { 442 pr_err("failed to map scatterlist\n"); 443 return ERR_PTR(-EIO); 444 } 445 446 attach->dma_dir = dma_dir; 447 448 return sgt; 449 } 450 451 static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, 452 struct sg_table *sgt, enum dma_data_direction dma_dir) 453 { 454 /* nothing to be done here */ 455 } 456 457 static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf) 458 { 459 /* drop reference obtained in vb2_dma_sg_get_dmabuf */ 460 vb2_dma_sg_put(dbuf->priv); 461 } 462 463 static int 464 vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf, 465 enum dma_data_direction direction) 466 { 467 struct vb2_dma_sg_buf *buf = dbuf->priv; 468 struct sg_table *sgt = buf->dma_sgt; 469 470 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); 471 return 0; 472 } 473 474 static int 475 vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf, 476 enum dma_data_direction direction) 477 { 478 struct vb2_dma_sg_buf *buf = dbuf->priv; 479 struct sg_table *sgt = buf->dma_sgt; 480 481 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); 482 return 0; 483 } 484 485 static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf, 486 struct iosys_map *map) 487 { 488 struct vb2_dma_sg_buf *buf = dbuf->priv; 489 490 iosys_map_set_vaddr(map, buf->vaddr); 491 492 return 0; 493 } 494 495 static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf, 496 struct vm_area_struct *vma) 497 { 498 return vb2_dma_sg_mmap(dbuf->priv, vma); 499 } 500 501 static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = { 502 .attach = vb2_dma_sg_dmabuf_ops_attach, 503 .detach = vb2_dma_sg_dmabuf_ops_detach, 504 .map_dma_buf = vb2_dma_sg_dmabuf_ops_map, 505 .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap, 506 .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access, 507 .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access, 508 .vmap = vb2_dma_sg_dmabuf_ops_vmap, 509 .mmap = vb2_dma_sg_dmabuf_ops_mmap, 510 .release = vb2_dma_sg_dmabuf_ops_release, 511 }; 512 513 static struct dma_buf *vb2_dma_sg_get_dmabuf(struct vb2_buffer *vb, 514 void *buf_priv, 515 unsigned long flags) 516 { 517 struct vb2_dma_sg_buf *buf = buf_priv; 518 struct dma_buf *dbuf; 519 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 520 521 exp_info.ops = &vb2_dma_sg_dmabuf_ops; 522 exp_info.size = buf->size; 523 exp_info.flags = flags; 524 exp_info.priv = buf; 525 526 if (WARN_ON(!buf->dma_sgt)) 527 return NULL; 528 529 dbuf = dma_buf_export(&exp_info); 530 if (IS_ERR(dbuf)) 531 return NULL; 532 533 /* dmabuf keeps reference to vb2 buffer */ 534 refcount_inc(&buf->refcount); 535 536 return dbuf; 537 } 538 539 /*********************************************/ 540 /* callbacks for DMABUF buffers */ 541 /*********************************************/ 542 543 static int vb2_dma_sg_map_dmabuf(void *mem_priv) 544 { 545 struct vb2_dma_sg_buf *buf = mem_priv; 546 struct sg_table *sgt; 547 548 if (WARN_ON(!buf->db_attach)) { 549 pr_err("trying to pin a non attached buffer\n"); 550 return -EINVAL; 551 } 552 553 if (WARN_ON(buf->dma_sgt)) { 554 pr_err("dmabuf buffer is already pinned\n"); 555 return 0; 556 } 557 558 /* get the associated scatterlist for this buffer */ 559 sgt = dma_buf_map_attachment_unlocked(buf->db_attach, buf->dma_dir); 560 if (IS_ERR(sgt)) { 561 pr_err("Error getting dmabuf scatterlist\n"); 562 return -EINVAL; 563 } 564 565 buf->dma_sgt = sgt; 566 buf->vaddr = NULL; 567 568 return 0; 569 } 570 571 static void vb2_dma_sg_unmap_dmabuf(void *mem_priv) 572 { 573 struct vb2_dma_sg_buf *buf = mem_priv; 574 struct sg_table *sgt = buf->dma_sgt; 575 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr); 576 577 if (WARN_ON(!buf->db_attach)) { 578 pr_err("trying to unpin a not attached buffer\n"); 579 return; 580 } 581 582 if (WARN_ON(!sgt)) { 583 pr_err("dmabuf buffer is already unpinned\n"); 584 return; 585 } 586 587 if (buf->vaddr) { 588 dma_buf_vunmap_unlocked(buf->db_attach->dmabuf, &map); 589 buf->vaddr = NULL; 590 } 591 dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt, buf->dma_dir); 592 593 buf->dma_sgt = NULL; 594 } 595 596 static void vb2_dma_sg_detach_dmabuf(void *mem_priv) 597 { 598 struct vb2_dma_sg_buf *buf = mem_priv; 599 600 /* if vb2 works correctly you should never detach mapped buffer */ 601 if (WARN_ON(buf->dma_sgt)) 602 vb2_dma_sg_unmap_dmabuf(buf); 603 604 /* detach this attachment */ 605 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach); 606 kfree(buf); 607 } 608 609 static void *vb2_dma_sg_attach_dmabuf(struct vb2_buffer *vb, struct device *dev, 610 struct dma_buf *dbuf, unsigned long size) 611 { 612 struct vb2_dma_sg_buf *buf; 613 struct dma_buf_attachment *dba; 614 615 if (WARN_ON(!dev)) 616 return ERR_PTR(-EINVAL); 617 618 if (dbuf->size < size) 619 return ERR_PTR(-EFAULT); 620 621 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 622 if (!buf) 623 return ERR_PTR(-ENOMEM); 624 625 buf->dev = dev; 626 /* create attachment for the dmabuf with the user device */ 627 dba = dma_buf_attach(dbuf, buf->dev); 628 if (IS_ERR(dba)) { 629 pr_err("failed to attach dmabuf\n"); 630 kfree(buf); 631 return dba; 632 } 633 634 buf->dma_dir = vb->vb2_queue->dma_dir; 635 buf->size = size; 636 buf->db_attach = dba; 637 buf->vb = vb; 638 639 return buf; 640 } 641 642 static void *vb2_dma_sg_cookie(struct vb2_buffer *vb, void *buf_priv) 643 { 644 struct vb2_dma_sg_buf *buf = buf_priv; 645 646 return buf->dma_sgt; 647 } 648 649 const struct vb2_mem_ops vb2_dma_sg_memops = { 650 .alloc = vb2_dma_sg_alloc, 651 .put = vb2_dma_sg_put, 652 .get_userptr = vb2_dma_sg_get_userptr, 653 .put_userptr = vb2_dma_sg_put_userptr, 654 .prepare = vb2_dma_sg_prepare, 655 .finish = vb2_dma_sg_finish, 656 .vaddr = vb2_dma_sg_vaddr, 657 .mmap = vb2_dma_sg_mmap, 658 .num_users = vb2_dma_sg_num_users, 659 .get_dmabuf = vb2_dma_sg_get_dmabuf, 660 .map_dmabuf = vb2_dma_sg_map_dmabuf, 661 .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf, 662 .attach_dmabuf = vb2_dma_sg_attach_dmabuf, 663 .detach_dmabuf = vb2_dma_sg_detach_dmabuf, 664 .cookie = vb2_dma_sg_cookie, 665 }; 666 EXPORT_SYMBOL_GPL(vb2_dma_sg_memops); 667 668 MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2"); 669 MODULE_AUTHOR("Andrzej Pietrasiewicz"); 670 MODULE_LICENSE("GPL"); 671 MODULE_IMPORT_NS(DMA_BUF); 672