1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright 2018 Noralf Trønnes 4 */ 5 6 #include <linux/dma-buf.h> 7 #include <linux/export.h> 8 #include <linux/module.h> 9 #include <linux/mutex.h> 10 #include <linux/shmem_fs.h> 11 #include <linux/slab.h> 12 #include <linux/vmalloc.h> 13 #include <linux/module.h> 14 15 #ifdef CONFIG_X86 16 #include <asm/set_memory.h> 17 #endif 18 19 #include <drm/drm.h> 20 #include <drm/drm_device.h> 21 #include <drm/drm_drv.h> 22 #include <drm/drm_gem_shmem_helper.h> 23 #include <drm/drm_prime.h> 24 #include <drm/drm_print.h> 25 26 MODULE_IMPORT_NS(DMA_BUF); 27 28 /** 29 * DOC: overview 30 * 31 * This library provides helpers for GEM objects backed by shmem buffers 32 * allocated using anonymous pageable memory. 33 * 34 * Functions that operate on the GEM object receive struct &drm_gem_shmem_object. 35 * For GEM callback helpers in struct &drm_gem_object functions, see likewise 36 * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps 37 * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion. 38 */ 39 40 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { 41 .free = drm_gem_shmem_object_free, 42 .print_info = drm_gem_shmem_object_print_info, 43 .pin = drm_gem_shmem_object_pin, 44 .unpin = drm_gem_shmem_object_unpin, 45 .get_sg_table = drm_gem_shmem_object_get_sg_table, 46 .vmap = drm_gem_shmem_object_vmap, 47 .vunmap = drm_gem_shmem_object_vunmap, 48 .mmap = drm_gem_shmem_object_mmap, 49 .vm_ops = &drm_gem_shmem_vm_ops, 50 }; 51 52 static struct drm_gem_shmem_object * 53 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) 54 { 55 struct drm_gem_shmem_object *shmem; 56 struct drm_gem_object *obj; 57 int ret = 0; 58 59 size = PAGE_ALIGN(size); 60 61 if (dev->driver->gem_create_object) { 62 obj = dev->driver->gem_create_object(dev, size); 63 if (IS_ERR(obj)) 64 return ERR_CAST(obj); 65 shmem = to_drm_gem_shmem_obj(obj); 66 } else { 67 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); 68 if (!shmem) 69 return ERR_PTR(-ENOMEM); 70 obj = &shmem->base; 71 } 72 73 if (!obj->funcs) 74 obj->funcs = &drm_gem_shmem_funcs; 75 76 if (private) { 77 drm_gem_private_object_init(dev, obj, size); 78 shmem->map_wc = false; /* dma-buf mappings use always writecombine */ 79 } else { 80 ret = drm_gem_object_init(dev, obj, size); 81 } 82 if (ret) 83 goto err_free; 84 85 ret = drm_gem_create_mmap_offset(obj); 86 if (ret) 87 goto err_release; 88 89 mutex_init(&shmem->pages_lock); 90 mutex_init(&shmem->vmap_lock); 91 INIT_LIST_HEAD(&shmem->madv_list); 92 93 if (!private) { 94 /* 95 * Our buffers are kept pinned, so allocating them 96 * from the MOVABLE zone is a really bad idea, and 97 * conflicts with CMA. See comments above new_inode() 98 * why this is required _and_ expected if you're 99 * going to pin these pages. 100 */ 101 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | 102 __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 103 } 104 105 return shmem; 106 107 err_release: 108 drm_gem_object_release(obj); 109 err_free: 110 kfree(obj); 111 112 return ERR_PTR(ret); 113 } 114 /** 115 * drm_gem_shmem_create - Allocate an object with the given size 116 * @dev: DRM device 117 * @size: Size of the object to allocate 118 * 119 * This function creates a shmem GEM object. 120 * 121 * Returns: 122 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative 123 * error code on failure. 124 */ 125 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size) 126 { 127 return __drm_gem_shmem_create(dev, size, false); 128 } 129 EXPORT_SYMBOL_GPL(drm_gem_shmem_create); 130 131 /** 132 * drm_gem_shmem_free - Free resources associated with a shmem GEM object 133 * @shmem: shmem GEM object to free 134 * 135 * This function cleans up the GEM object state and frees the memory used to 136 * store the object itself. 137 */ 138 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) 139 { 140 struct drm_gem_object *obj = &shmem->base; 141 142 WARN_ON(shmem->vmap_use_count); 143 144 if (obj->import_attach) { 145 drm_prime_gem_destroy(obj, shmem->sgt); 146 } else { 147 if (shmem->sgt) { 148 dma_unmap_sgtable(obj->dev->dev, shmem->sgt, 149 DMA_BIDIRECTIONAL, 0); 150 sg_free_table(shmem->sgt); 151 kfree(shmem->sgt); 152 } 153 if (shmem->pages) 154 drm_gem_shmem_put_pages(shmem); 155 } 156 157 WARN_ON(shmem->pages_use_count); 158 159 drm_gem_object_release(obj); 160 mutex_destroy(&shmem->pages_lock); 161 mutex_destroy(&shmem->vmap_lock); 162 kfree(shmem); 163 } 164 EXPORT_SYMBOL_GPL(drm_gem_shmem_free); 165 166 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) 167 { 168 struct drm_gem_object *obj = &shmem->base; 169 struct page **pages; 170 171 if (shmem->pages_use_count++ > 0) 172 return 0; 173 174 pages = drm_gem_get_pages(obj); 175 if (IS_ERR(pages)) { 176 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages)); 177 shmem->pages_use_count = 0; 178 return PTR_ERR(pages); 179 } 180 181 /* 182 * TODO: Allocating WC pages which are correctly flushed is only 183 * supported on x86. Ideal solution would be a GFP_WC flag, which also 184 * ttm_pool.c could use. 185 */ 186 #ifdef CONFIG_X86 187 if (shmem->map_wc) 188 set_pages_array_wc(pages, obj->size >> PAGE_SHIFT); 189 #endif 190 191 shmem->pages = pages; 192 193 return 0; 194 } 195 196 /* 197 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object 198 * @shmem: shmem GEM object 199 * 200 * This function makes sure that backing pages exists for the shmem GEM object 201 * and increases the use count. 202 * 203 * Returns: 204 * 0 on success or a negative error code on failure. 205 */ 206 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) 207 { 208 int ret; 209 210 WARN_ON(shmem->base.import_attach); 211 212 ret = mutex_lock_interruptible(&shmem->pages_lock); 213 if (ret) 214 return ret; 215 ret = drm_gem_shmem_get_pages_locked(shmem); 216 mutex_unlock(&shmem->pages_lock); 217 218 return ret; 219 } 220 EXPORT_SYMBOL(drm_gem_shmem_get_pages); 221 222 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) 223 { 224 struct drm_gem_object *obj = &shmem->base; 225 226 if (WARN_ON_ONCE(!shmem->pages_use_count)) 227 return; 228 229 if (--shmem->pages_use_count > 0) 230 return; 231 232 #ifdef CONFIG_X86 233 if (shmem->map_wc) 234 set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT); 235 #endif 236 237 drm_gem_put_pages(obj, shmem->pages, 238 shmem->pages_mark_dirty_on_put, 239 shmem->pages_mark_accessed_on_put); 240 shmem->pages = NULL; 241 } 242 243 /* 244 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object 245 * @shmem: shmem GEM object 246 * 247 * This function decreases the use count and puts the backing pages when use drops to zero. 248 */ 249 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) 250 { 251 mutex_lock(&shmem->pages_lock); 252 drm_gem_shmem_put_pages_locked(shmem); 253 mutex_unlock(&shmem->pages_lock); 254 } 255 EXPORT_SYMBOL(drm_gem_shmem_put_pages); 256 257 /** 258 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object 259 * @shmem: shmem GEM object 260 * 261 * This function makes sure the backing pages are pinned in memory while the 262 * buffer is exported. 263 * 264 * Returns: 265 * 0 on success or a negative error code on failure. 266 */ 267 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) 268 { 269 WARN_ON(shmem->base.import_attach); 270 271 return drm_gem_shmem_get_pages(shmem); 272 } 273 EXPORT_SYMBOL(drm_gem_shmem_pin); 274 275 /** 276 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object 277 * @shmem: shmem GEM object 278 * 279 * This function removes the requirement that the backing pages are pinned in 280 * memory. 281 */ 282 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) 283 { 284 WARN_ON(shmem->base.import_attach); 285 286 drm_gem_shmem_put_pages(shmem); 287 } 288 EXPORT_SYMBOL(drm_gem_shmem_unpin); 289 290 static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, 291 struct iosys_map *map) 292 { 293 struct drm_gem_object *obj = &shmem->base; 294 int ret = 0; 295 296 if (shmem->vmap_use_count++ > 0) { 297 iosys_map_set_vaddr(map, shmem->vaddr); 298 return 0; 299 } 300 301 if (obj->import_attach) { 302 ret = dma_buf_vmap(obj->import_attach->dmabuf, map); 303 if (!ret) { 304 if (WARN_ON(map->is_iomem)) { 305 dma_buf_vunmap(obj->import_attach->dmabuf, map); 306 ret = -EIO; 307 goto err_put_pages; 308 } 309 shmem->vaddr = map->vaddr; 310 } 311 } else { 312 pgprot_t prot = PAGE_KERNEL; 313 314 ret = drm_gem_shmem_get_pages(shmem); 315 if (ret) 316 goto err_zero_use; 317 318 if (shmem->map_wc) 319 prot = pgprot_writecombine(prot); 320 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, 321 VM_MAP, prot); 322 if (!shmem->vaddr) 323 ret = -ENOMEM; 324 else 325 iosys_map_set_vaddr(map, shmem->vaddr); 326 } 327 328 if (ret) { 329 DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret); 330 goto err_put_pages; 331 } 332 333 return 0; 334 335 err_put_pages: 336 if (!obj->import_attach) 337 drm_gem_shmem_put_pages(shmem); 338 err_zero_use: 339 shmem->vmap_use_count = 0; 340 341 return ret; 342 } 343 344 /* 345 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object 346 * @shmem: shmem GEM object 347 * @map: Returns the kernel virtual address of the SHMEM GEM object's backing 348 * store. 349 * 350 * This function makes sure that a contiguous kernel virtual address mapping 351 * exists for the buffer backing the shmem GEM object. It hides the differences 352 * between dma-buf imported and natively allocated objects. 353 * 354 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(). 355 * 356 * Returns: 357 * 0 on success or a negative error code on failure. 358 */ 359 int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, 360 struct iosys_map *map) 361 { 362 int ret; 363 364 ret = mutex_lock_interruptible(&shmem->vmap_lock); 365 if (ret) 366 return ret; 367 ret = drm_gem_shmem_vmap_locked(shmem, map); 368 mutex_unlock(&shmem->vmap_lock); 369 370 return ret; 371 } 372 EXPORT_SYMBOL(drm_gem_shmem_vmap); 373 374 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, 375 struct iosys_map *map) 376 { 377 struct drm_gem_object *obj = &shmem->base; 378 379 if (WARN_ON_ONCE(!shmem->vmap_use_count)) 380 return; 381 382 if (--shmem->vmap_use_count > 0) 383 return; 384 385 if (obj->import_attach) { 386 dma_buf_vunmap(obj->import_attach->dmabuf, map); 387 } else { 388 vunmap(shmem->vaddr); 389 drm_gem_shmem_put_pages(shmem); 390 } 391 392 shmem->vaddr = NULL; 393 } 394 395 /* 396 * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object 397 * @shmem: shmem GEM object 398 * @map: Kernel virtual address where the SHMEM GEM object was mapped 399 * 400 * This function cleans up a kernel virtual address mapping acquired by 401 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to 402 * zero. 403 * 404 * This function hides the differences between dma-buf imported and natively 405 * allocated objects. 406 */ 407 void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, 408 struct iosys_map *map) 409 { 410 mutex_lock(&shmem->vmap_lock); 411 drm_gem_shmem_vunmap_locked(shmem, map); 412 mutex_unlock(&shmem->vmap_lock); 413 } 414 EXPORT_SYMBOL(drm_gem_shmem_vunmap); 415 416 static struct drm_gem_shmem_object * 417 drm_gem_shmem_create_with_handle(struct drm_file *file_priv, 418 struct drm_device *dev, size_t size, 419 uint32_t *handle) 420 { 421 struct drm_gem_shmem_object *shmem; 422 int ret; 423 424 shmem = drm_gem_shmem_create(dev, size); 425 if (IS_ERR(shmem)) 426 return shmem; 427 428 /* 429 * Allocate an id of idr table where the obj is registered 430 * and handle has the id what user can see. 431 */ 432 ret = drm_gem_handle_create(file_priv, &shmem->base, handle); 433 /* drop reference from allocate - handle holds it now. */ 434 drm_gem_object_put(&shmem->base); 435 if (ret) 436 return ERR_PTR(ret); 437 438 return shmem; 439 } 440 441 /* Update madvise status, returns true if not purged, else 442 * false or -errno. 443 */ 444 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv) 445 { 446 mutex_lock(&shmem->pages_lock); 447 448 if (shmem->madv >= 0) 449 shmem->madv = madv; 450 451 madv = shmem->madv; 452 453 mutex_unlock(&shmem->pages_lock); 454 455 return (madv >= 0); 456 } 457 EXPORT_SYMBOL(drm_gem_shmem_madvise); 458 459 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) 460 { 461 struct drm_gem_object *obj = &shmem->base; 462 struct drm_device *dev = obj->dev; 463 464 WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); 465 466 dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); 467 sg_free_table(shmem->sgt); 468 kfree(shmem->sgt); 469 shmem->sgt = NULL; 470 471 drm_gem_shmem_put_pages_locked(shmem); 472 473 shmem->madv = -1; 474 475 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 476 drm_gem_free_mmap_offset(obj); 477 478 /* Our goal here is to return as much of the memory as 479 * is possible back to the system as we are called from OOM. 480 * To do this we must instruct the shmfs to drop all of its 481 * backing pages, *now*. 482 */ 483 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 484 485 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1); 486 } 487 EXPORT_SYMBOL(drm_gem_shmem_purge_locked); 488 489 bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) 490 { 491 if (!mutex_trylock(&shmem->pages_lock)) 492 return false; 493 drm_gem_shmem_purge_locked(shmem); 494 mutex_unlock(&shmem->pages_lock); 495 496 return true; 497 } 498 EXPORT_SYMBOL(drm_gem_shmem_purge); 499 500 /** 501 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object 502 * @file: DRM file structure to create the dumb buffer for 503 * @dev: DRM device 504 * @args: IOCTL data 505 * 506 * This function computes the pitch of the dumb buffer and rounds it up to an 507 * integer number of bytes per pixel. Drivers for hardware that doesn't have 508 * any additional restrictions on the pitch can directly use this function as 509 * their &drm_driver.dumb_create callback. 510 * 511 * For hardware with additional restrictions, drivers can adjust the fields 512 * set up by userspace before calling into this function. 513 * 514 * Returns: 515 * 0 on success or a negative error code on failure. 516 */ 517 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, 518 struct drm_mode_create_dumb *args) 519 { 520 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 521 struct drm_gem_shmem_object *shmem; 522 523 if (!args->pitch || !args->size) { 524 args->pitch = min_pitch; 525 args->size = PAGE_ALIGN(args->pitch * args->height); 526 } else { 527 /* ensure sane minimum values */ 528 if (args->pitch < min_pitch) 529 args->pitch = min_pitch; 530 if (args->size < args->pitch * args->height) 531 args->size = PAGE_ALIGN(args->pitch * args->height); 532 } 533 534 shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle); 535 536 return PTR_ERR_OR_ZERO(shmem); 537 } 538 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create); 539 540 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) 541 { 542 struct vm_area_struct *vma = vmf->vma; 543 struct drm_gem_object *obj = vma->vm_private_data; 544 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 545 loff_t num_pages = obj->size >> PAGE_SHIFT; 546 vm_fault_t ret; 547 struct page *page; 548 pgoff_t page_offset; 549 550 /* We don't use vmf->pgoff since that has the fake offset */ 551 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 552 553 mutex_lock(&shmem->pages_lock); 554 555 if (page_offset >= num_pages || 556 WARN_ON_ONCE(!shmem->pages) || 557 shmem->madv < 0) { 558 ret = VM_FAULT_SIGBUS; 559 } else { 560 page = shmem->pages[page_offset]; 561 562 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); 563 } 564 565 mutex_unlock(&shmem->pages_lock); 566 567 return ret; 568 } 569 570 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) 571 { 572 struct drm_gem_object *obj = vma->vm_private_data; 573 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 574 int ret; 575 576 WARN_ON(shmem->base.import_attach); 577 578 ret = drm_gem_shmem_get_pages(shmem); 579 WARN_ON_ONCE(ret != 0); 580 581 drm_gem_vm_open(vma); 582 } 583 584 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) 585 { 586 struct drm_gem_object *obj = vma->vm_private_data; 587 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 588 589 drm_gem_shmem_put_pages(shmem); 590 drm_gem_vm_close(vma); 591 } 592 593 const struct vm_operations_struct drm_gem_shmem_vm_ops = { 594 .fault = drm_gem_shmem_fault, 595 .open = drm_gem_shmem_vm_open, 596 .close = drm_gem_shmem_vm_close, 597 }; 598 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops); 599 600 /** 601 * drm_gem_shmem_mmap - Memory-map a shmem GEM object 602 * @shmem: shmem GEM object 603 * @vma: VMA for the area to be mapped 604 * 605 * This function implements an augmented version of the GEM DRM file mmap 606 * operation for shmem objects. 607 * 608 * Returns: 609 * 0 on success or a negative error code on failure. 610 */ 611 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma) 612 { 613 struct drm_gem_object *obj = &shmem->base; 614 int ret; 615 616 if (obj->import_attach) { 617 /* Drop the reference drm_gem_mmap_obj() acquired.*/ 618 drm_gem_object_put(obj); 619 vma->vm_private_data = NULL; 620 621 return dma_buf_mmap(obj->dma_buf, vma, 0); 622 } 623 624 ret = drm_gem_shmem_get_pages(shmem); 625 if (ret) { 626 drm_gem_vm_close(vma); 627 return ret; 628 } 629 630 vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 631 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 632 if (shmem->map_wc) 633 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 634 635 return 0; 636 } 637 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap); 638 639 /** 640 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs 641 * @shmem: shmem GEM object 642 * @p: DRM printer 643 * @indent: Tab indentation level 644 */ 645 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem, 646 struct drm_printer *p, unsigned int indent) 647 { 648 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); 649 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); 650 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); 651 } 652 EXPORT_SYMBOL(drm_gem_shmem_print_info); 653 654 /** 655 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned 656 * pages for a shmem GEM object 657 * @shmem: shmem GEM object 658 * 659 * This function exports a scatter/gather table suitable for PRIME usage by 660 * calling the standard DMA mapping API. 661 * 662 * Drivers who need to acquire an scatter/gather table for objects need to call 663 * drm_gem_shmem_get_pages_sgt() instead. 664 * 665 * Returns: 666 * A pointer to the scatter/gather table of pinned pages or error pointer on failure. 667 */ 668 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem) 669 { 670 struct drm_gem_object *obj = &shmem->base; 671 672 WARN_ON(shmem->base.import_attach); 673 674 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT); 675 } 676 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); 677 678 /** 679 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a 680 * scatter/gather table for a shmem GEM object. 681 * @shmem: shmem GEM object 682 * 683 * This function returns a scatter/gather table suitable for driver usage. If 684 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg 685 * table created. 686 * 687 * This is the main function for drivers to get at backing storage, and it hides 688 * and difference between dma-buf imported and natively allocated objects. 689 * drm_gem_shmem_get_sg_table() should not be directly called by drivers. 690 * 691 * Returns: 692 * A pointer to the scatter/gather table of pinned pages or errno on failure. 693 */ 694 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem) 695 { 696 struct drm_gem_object *obj = &shmem->base; 697 int ret; 698 struct sg_table *sgt; 699 700 if (shmem->sgt) 701 return shmem->sgt; 702 703 WARN_ON(obj->import_attach); 704 705 ret = drm_gem_shmem_get_pages(shmem); 706 if (ret) 707 return ERR_PTR(ret); 708 709 sgt = drm_gem_shmem_get_sg_table(shmem); 710 if (IS_ERR(sgt)) { 711 ret = PTR_ERR(sgt); 712 goto err_put_pages; 713 } 714 /* Map the pages for use by the h/w. */ 715 ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0); 716 if (ret) 717 goto err_free_sgt; 718 719 shmem->sgt = sgt; 720 721 return sgt; 722 723 err_free_sgt: 724 sg_free_table(sgt); 725 kfree(sgt); 726 err_put_pages: 727 drm_gem_shmem_put_pages(shmem); 728 return ERR_PTR(ret); 729 } 730 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt); 731 732 /** 733 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from 734 * another driver's scatter/gather table of pinned pages 735 * @dev: Device to import into 736 * @attach: DMA-BUF attachment 737 * @sgt: Scatter/gather table of pinned pages 738 * 739 * This function imports a scatter/gather table exported via DMA-BUF by 740 * another driver. Drivers that use the shmem helpers should set this as their 741 * &drm_driver.gem_prime_import_sg_table callback. 742 * 743 * Returns: 744 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative 745 * error code on failure. 746 */ 747 struct drm_gem_object * 748 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, 749 struct dma_buf_attachment *attach, 750 struct sg_table *sgt) 751 { 752 size_t size = PAGE_ALIGN(attach->dmabuf->size); 753 struct drm_gem_shmem_object *shmem; 754 755 shmem = __drm_gem_shmem_create(dev, size, true); 756 if (IS_ERR(shmem)) 757 return ERR_CAST(shmem); 758 759 shmem->sgt = sgt; 760 761 DRM_DEBUG_PRIME("size = %zu\n", size); 762 763 return &shmem->base; 764 } 765 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table); 766 767 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers"); 768 MODULE_IMPORT_NS(DMA_BUF); 769 MODULE_LICENSE("GPL v2"); 770