1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright 2018 Noralf Trønnes 4 */ 5 6 #include <linux/dma-buf.h> 7 #include <linux/export.h> 8 #include <linux/mutex.h> 9 #include <linux/shmem_fs.h> 10 #include <linux/slab.h> 11 #include <linux/vmalloc.h> 12 13 #include <drm/drm.h> 14 #include <drm/drm_device.h> 15 #include <drm/drm_drv.h> 16 #include <drm/drm_gem_shmem_helper.h> 17 #include <drm/drm_prime.h> 18 #include <drm/drm_print.h> 19 20 /** 21 * DOC: overview 22 * 23 * This library provides helpers for GEM objects backed by shmem buffers 24 * allocated using anonymous pageable memory. 25 */ 26 27 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { 28 .free = drm_gem_shmem_free_object, 29 .print_info = drm_gem_shmem_print_info, 30 .pin = drm_gem_shmem_pin, 31 .unpin = drm_gem_shmem_unpin, 32 .get_sg_table = drm_gem_shmem_get_sg_table, 33 .vmap = drm_gem_shmem_vmap, 34 .vunmap = drm_gem_shmem_vunmap, 35 .mmap = drm_gem_shmem_mmap, 36 }; 37 38 static struct drm_gem_shmem_object * 39 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) 40 { 41 struct drm_gem_shmem_object *shmem; 42 struct drm_gem_object *obj; 43 int ret = 0; 44 45 size = PAGE_ALIGN(size); 46 47 if (dev->driver->gem_create_object) 48 obj = dev->driver->gem_create_object(dev, size); 49 else 50 obj = kzalloc(sizeof(*shmem), GFP_KERNEL); 51 if (!obj) 52 return ERR_PTR(-ENOMEM); 53 54 if (!obj->funcs) 55 obj->funcs = &drm_gem_shmem_funcs; 56 57 if (private) 58 drm_gem_private_object_init(dev, obj, size); 59 else 60 ret = drm_gem_object_init(dev, obj, size); 61 if (ret) 62 goto err_free; 63 64 ret = drm_gem_create_mmap_offset(obj); 65 if (ret) 66 goto err_release; 67 68 shmem = to_drm_gem_shmem_obj(obj); 69 mutex_init(&shmem->pages_lock); 70 mutex_init(&shmem->vmap_lock); 71 INIT_LIST_HEAD(&shmem->madv_list); 72 73 if (!private) { 74 /* 75 * Our buffers are kept pinned, so allocating them 76 * from the MOVABLE zone is a really bad idea, and 77 * conflicts with CMA. See comments above new_inode() 78 * why this is required _and_ expected if you're 79 * going to pin these pages. 80 */ 81 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | 82 __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 83 } 84 85 return shmem; 86 87 err_release: 88 drm_gem_object_release(obj); 89 err_free: 90 kfree(obj); 91 92 return ERR_PTR(ret); 93 } 94 /** 95 * drm_gem_shmem_create - Allocate an object with the given size 96 * @dev: DRM device 97 * @size: Size of the object to allocate 98 * 99 * This function creates a shmem GEM object. 100 * 101 * Returns: 102 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative 103 * error code on failure. 104 */ 105 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size) 106 { 107 return __drm_gem_shmem_create(dev, size, false); 108 } 109 EXPORT_SYMBOL_GPL(drm_gem_shmem_create); 110 111 /** 112 * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object 113 * @obj: GEM object to free 114 * 115 * This function cleans up the GEM object state and frees the memory used to 116 * store the object itself. It should be used to implement 117 * &drm_gem_object_funcs.free. 118 */ 119 void drm_gem_shmem_free_object(struct drm_gem_object *obj) 120 { 121 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 122 123 WARN_ON(shmem->vmap_use_count); 124 125 if (obj->import_attach) { 126 drm_prime_gem_destroy(obj, shmem->sgt); 127 } else { 128 if (shmem->sgt) { 129 dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl, 130 shmem->sgt->nents, DMA_BIDIRECTIONAL); 131 sg_free_table(shmem->sgt); 132 kfree(shmem->sgt); 133 } 134 if (shmem->pages) 135 drm_gem_shmem_put_pages(shmem); 136 } 137 138 WARN_ON(shmem->pages_use_count); 139 140 drm_gem_object_release(obj); 141 mutex_destroy(&shmem->pages_lock); 142 mutex_destroy(&shmem->vmap_lock); 143 kfree(shmem); 144 } 145 EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object); 146 147 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) 148 { 149 struct drm_gem_object *obj = &shmem->base; 150 struct page **pages; 151 152 if (shmem->pages_use_count++ > 0) 153 return 0; 154 155 pages = drm_gem_get_pages(obj); 156 if (IS_ERR(pages)) { 157 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages)); 158 shmem->pages_use_count = 0; 159 return PTR_ERR(pages); 160 } 161 162 shmem->pages = pages; 163 164 return 0; 165 } 166 167 /* 168 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object 169 * @shmem: shmem GEM object 170 * 171 * This function makes sure that backing pages exists for the shmem GEM object 172 * and increases the use count. 173 * 174 * Returns: 175 * 0 on success or a negative error code on failure. 176 */ 177 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) 178 { 179 int ret; 180 181 WARN_ON(shmem->base.import_attach); 182 183 ret = mutex_lock_interruptible(&shmem->pages_lock); 184 if (ret) 185 return ret; 186 ret = drm_gem_shmem_get_pages_locked(shmem); 187 mutex_unlock(&shmem->pages_lock); 188 189 return ret; 190 } 191 EXPORT_SYMBOL(drm_gem_shmem_get_pages); 192 193 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) 194 { 195 struct drm_gem_object *obj = &shmem->base; 196 197 if (WARN_ON_ONCE(!shmem->pages_use_count)) 198 return; 199 200 if (--shmem->pages_use_count > 0) 201 return; 202 203 drm_gem_put_pages(obj, shmem->pages, 204 shmem->pages_mark_dirty_on_put, 205 shmem->pages_mark_accessed_on_put); 206 shmem->pages = NULL; 207 } 208 209 /* 210 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object 211 * @shmem: shmem GEM object 212 * 213 * This function decreases the use count and puts the backing pages when use drops to zero. 214 */ 215 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) 216 { 217 mutex_lock(&shmem->pages_lock); 218 drm_gem_shmem_put_pages_locked(shmem); 219 mutex_unlock(&shmem->pages_lock); 220 } 221 EXPORT_SYMBOL(drm_gem_shmem_put_pages); 222 223 /** 224 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object 225 * @obj: GEM object 226 * 227 * This function makes sure the backing pages are pinned in memory while the 228 * buffer is exported. It should only be used to implement 229 * &drm_gem_object_funcs.pin. 230 * 231 * Returns: 232 * 0 on success or a negative error code on failure. 233 */ 234 int drm_gem_shmem_pin(struct drm_gem_object *obj) 235 { 236 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 237 238 WARN_ON(shmem->base.import_attach); 239 240 return drm_gem_shmem_get_pages(shmem); 241 } 242 EXPORT_SYMBOL(drm_gem_shmem_pin); 243 244 /** 245 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object 246 * @obj: GEM object 247 * 248 * This function removes the requirement that the backing pages are pinned in 249 * memory. It should only be used to implement &drm_gem_object_funcs.unpin. 250 */ 251 void drm_gem_shmem_unpin(struct drm_gem_object *obj) 252 { 253 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 254 255 WARN_ON(shmem->base.import_attach); 256 257 drm_gem_shmem_put_pages(shmem); 258 } 259 EXPORT_SYMBOL(drm_gem_shmem_unpin); 260 261 static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) 262 { 263 struct drm_gem_object *obj = &shmem->base; 264 int ret; 265 266 if (shmem->vmap_use_count++ > 0) 267 return shmem->vaddr; 268 269 if (obj->import_attach) { 270 shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf); 271 } else { 272 pgprot_t prot = PAGE_KERNEL; 273 274 ret = drm_gem_shmem_get_pages(shmem); 275 if (ret) 276 goto err_zero_use; 277 278 if (!shmem->map_cached) 279 prot = pgprot_writecombine(prot); 280 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, 281 VM_MAP, prot); 282 } 283 284 if (!shmem->vaddr) { 285 DRM_DEBUG_KMS("Failed to vmap pages\n"); 286 ret = -ENOMEM; 287 goto err_put_pages; 288 } 289 290 return shmem->vaddr; 291 292 err_put_pages: 293 if (!obj->import_attach) 294 drm_gem_shmem_put_pages(shmem); 295 err_zero_use: 296 shmem->vmap_use_count = 0; 297 298 return ERR_PTR(ret); 299 } 300 301 /* 302 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object 303 * @shmem: shmem GEM object 304 * 305 * This function makes sure that a contiguous kernel virtual address mapping 306 * exists for the buffer backing the shmem GEM object. 307 * 308 * This function can be used to implement &drm_gem_object_funcs.vmap. But it can 309 * also be called by drivers directly, in which case it will hide the 310 * differences between dma-buf imported and natively allocated objects. 311 * 312 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(). 313 * 314 * Returns: 315 * 0 on success or a negative error code on failure. 316 */ 317 void *drm_gem_shmem_vmap(struct drm_gem_object *obj) 318 { 319 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 320 void *vaddr; 321 int ret; 322 323 ret = mutex_lock_interruptible(&shmem->vmap_lock); 324 if (ret) 325 return ERR_PTR(ret); 326 vaddr = drm_gem_shmem_vmap_locked(shmem); 327 mutex_unlock(&shmem->vmap_lock); 328 329 return vaddr; 330 } 331 EXPORT_SYMBOL(drm_gem_shmem_vmap); 332 333 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) 334 { 335 struct drm_gem_object *obj = &shmem->base; 336 337 if (WARN_ON_ONCE(!shmem->vmap_use_count)) 338 return; 339 340 if (--shmem->vmap_use_count > 0) 341 return; 342 343 if (obj->import_attach) 344 dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr); 345 else 346 vunmap(shmem->vaddr); 347 348 shmem->vaddr = NULL; 349 drm_gem_shmem_put_pages(shmem); 350 } 351 352 /* 353 * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object 354 * @shmem: shmem GEM object 355 * 356 * This function cleans up a kernel virtual address mapping acquired by 357 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to 358 * zero. 359 * 360 * This function can be used to implement &drm_gem_object_funcs.vmap. But it can 361 * also be called by drivers directly, in which case it will hide the 362 * differences between dma-buf imported and natively allocated objects. 363 */ 364 void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr) 365 { 366 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 367 368 mutex_lock(&shmem->vmap_lock); 369 drm_gem_shmem_vunmap_locked(shmem); 370 mutex_unlock(&shmem->vmap_lock); 371 } 372 EXPORT_SYMBOL(drm_gem_shmem_vunmap); 373 374 struct drm_gem_shmem_object * 375 drm_gem_shmem_create_with_handle(struct drm_file *file_priv, 376 struct drm_device *dev, size_t size, 377 uint32_t *handle) 378 { 379 struct drm_gem_shmem_object *shmem; 380 int ret; 381 382 shmem = drm_gem_shmem_create(dev, size); 383 if (IS_ERR(shmem)) 384 return shmem; 385 386 /* 387 * Allocate an id of idr table where the obj is registered 388 * and handle has the id what user can see. 389 */ 390 ret = drm_gem_handle_create(file_priv, &shmem->base, handle); 391 /* drop reference from allocate - handle holds it now. */ 392 drm_gem_object_put(&shmem->base); 393 if (ret) 394 return ERR_PTR(ret); 395 396 return shmem; 397 } 398 EXPORT_SYMBOL(drm_gem_shmem_create_with_handle); 399 400 /* Update madvise status, returns true if not purged, else 401 * false or -errno. 402 */ 403 int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv) 404 { 405 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 406 407 mutex_lock(&shmem->pages_lock); 408 409 if (shmem->madv >= 0) 410 shmem->madv = madv; 411 412 madv = shmem->madv; 413 414 mutex_unlock(&shmem->pages_lock); 415 416 return (madv >= 0); 417 } 418 EXPORT_SYMBOL(drm_gem_shmem_madvise); 419 420 void drm_gem_shmem_purge_locked(struct drm_gem_object *obj) 421 { 422 struct drm_device *dev = obj->dev; 423 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 424 425 WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); 426 427 dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl, 428 shmem->sgt->nents, DMA_BIDIRECTIONAL); 429 sg_free_table(shmem->sgt); 430 kfree(shmem->sgt); 431 shmem->sgt = NULL; 432 433 drm_gem_shmem_put_pages_locked(shmem); 434 435 shmem->madv = -1; 436 437 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 438 drm_gem_free_mmap_offset(obj); 439 440 /* Our goal here is to return as much of the memory as 441 * is possible back to the system as we are called from OOM. 442 * To do this we must instruct the shmfs to drop all of its 443 * backing pages, *now*. 444 */ 445 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 446 447 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 448 0, (loff_t)-1); 449 } 450 EXPORT_SYMBOL(drm_gem_shmem_purge_locked); 451 452 bool drm_gem_shmem_purge(struct drm_gem_object *obj) 453 { 454 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 455 456 if (!mutex_trylock(&shmem->pages_lock)) 457 return false; 458 drm_gem_shmem_purge_locked(obj); 459 mutex_unlock(&shmem->pages_lock); 460 461 return true; 462 } 463 EXPORT_SYMBOL(drm_gem_shmem_purge); 464 465 /** 466 * drm_gem_shmem_create_object_cached - Create a shmem buffer object with 467 * cached mappings 468 * @dev: DRM device 469 * @size: Size of the object to allocate 470 * 471 * By default, shmem buffer objects use writecombine mappings. This 472 * function implements struct drm_driver.gem_create_object for shmem 473 * buffer objects with cached mappings. 474 * 475 * Returns: 476 * A struct drm_gem_shmem_object * on success or NULL negative on failure. 477 */ 478 struct drm_gem_object * 479 drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size) 480 { 481 struct drm_gem_shmem_object *shmem; 482 483 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); 484 if (!shmem) 485 return NULL; 486 shmem->map_cached = true; 487 488 return &shmem->base; 489 } 490 EXPORT_SYMBOL(drm_gem_shmem_create_object_cached); 491 492 /** 493 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object 494 * @file: DRM file structure to create the dumb buffer for 495 * @dev: DRM device 496 * @args: IOCTL data 497 * 498 * This function computes the pitch of the dumb buffer and rounds it up to an 499 * integer number of bytes per pixel. Drivers for hardware that doesn't have 500 * any additional restrictions on the pitch can directly use this function as 501 * their &drm_driver.dumb_create callback. 502 * 503 * For hardware with additional restrictions, drivers can adjust the fields 504 * set up by userspace before calling into this function. 505 * 506 * Returns: 507 * 0 on success or a negative error code on failure. 508 */ 509 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, 510 struct drm_mode_create_dumb *args) 511 { 512 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 513 struct drm_gem_shmem_object *shmem; 514 515 if (!args->pitch || !args->size) { 516 args->pitch = min_pitch; 517 args->size = args->pitch * args->height; 518 } else { 519 /* ensure sane minimum values */ 520 if (args->pitch < min_pitch) 521 args->pitch = min_pitch; 522 if (args->size < args->pitch * args->height) 523 args->size = args->pitch * args->height; 524 } 525 526 shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle); 527 528 return PTR_ERR_OR_ZERO(shmem); 529 } 530 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create); 531 532 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) 533 { 534 struct vm_area_struct *vma = vmf->vma; 535 struct drm_gem_object *obj = vma->vm_private_data; 536 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 537 loff_t num_pages = obj->size >> PAGE_SHIFT; 538 struct page *page; 539 540 if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages)) 541 return VM_FAULT_SIGBUS; 542 543 page = shmem->pages[vmf->pgoff]; 544 545 return vmf_insert_page(vma, vmf->address, page); 546 } 547 548 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) 549 { 550 struct drm_gem_object *obj = vma->vm_private_data; 551 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 552 int ret; 553 554 WARN_ON(shmem->base.import_attach); 555 556 ret = drm_gem_shmem_get_pages(shmem); 557 WARN_ON_ONCE(ret != 0); 558 559 drm_gem_vm_open(vma); 560 } 561 562 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) 563 { 564 struct drm_gem_object *obj = vma->vm_private_data; 565 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 566 567 drm_gem_shmem_put_pages(shmem); 568 drm_gem_vm_close(vma); 569 } 570 571 static const struct vm_operations_struct drm_gem_shmem_vm_ops = { 572 .fault = drm_gem_shmem_fault, 573 .open = drm_gem_shmem_vm_open, 574 .close = drm_gem_shmem_vm_close, 575 }; 576 577 /** 578 * drm_gem_shmem_mmap - Memory-map a shmem GEM object 579 * @obj: gem object 580 * @vma: VMA for the area to be mapped 581 * 582 * This function implements an augmented version of the GEM DRM file mmap 583 * operation for shmem objects. Drivers which employ the shmem helpers should 584 * use this function as their &drm_gem_object_funcs.mmap handler. 585 * 586 * Returns: 587 * 0 on success or a negative error code on failure. 588 */ 589 int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 590 { 591 struct drm_gem_shmem_object *shmem; 592 int ret; 593 594 /* Remove the fake offset */ 595 vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); 596 597 if (obj->import_attach) 598 return dma_buf_mmap(obj->dma_buf, vma, 0); 599 600 shmem = to_drm_gem_shmem_obj(obj); 601 602 ret = drm_gem_shmem_get_pages(shmem); 603 if (ret) { 604 drm_gem_vm_close(vma); 605 return ret; 606 } 607 608 vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; 609 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 610 if (!shmem->map_cached) 611 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 612 vma->vm_ops = &drm_gem_shmem_vm_ops; 613 614 return 0; 615 } 616 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap); 617 618 /** 619 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs 620 * @p: DRM printer 621 * @indent: Tab indentation level 622 * @obj: GEM object 623 * 624 * This implements the &drm_gem_object_funcs.info callback. 625 */ 626 void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent, 627 const struct drm_gem_object *obj) 628 { 629 const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 630 631 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); 632 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); 633 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); 634 } 635 EXPORT_SYMBOL(drm_gem_shmem_print_info); 636 637 /** 638 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned 639 * pages for a shmem GEM object 640 * @obj: GEM object 641 * 642 * This function exports a scatter/gather table suitable for PRIME usage by 643 * calling the standard DMA mapping API. Drivers should not call this function 644 * directly, instead it should only be used as an implementation for 645 * &drm_gem_object_funcs.get_sg_table. 646 * 647 * Drivers who need to acquire an scatter/gather table for objects need to call 648 * drm_gem_shmem_get_pages_sgt() instead. 649 * 650 * Returns: 651 * A pointer to the scatter/gather table of pinned pages or NULL on failure. 652 */ 653 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj) 654 { 655 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 656 657 WARN_ON(shmem->base.import_attach); 658 659 return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT); 660 } 661 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); 662 663 /** 664 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a 665 * scatter/gather table for a shmem GEM object. 666 * @obj: GEM object 667 * 668 * This function returns a scatter/gather table suitable for driver usage. If 669 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg 670 * table created. 671 * 672 * This is the main function for drivers to get at backing storage, and it hides 673 * and difference between dma-buf imported and natively allocated objects. 674 * drm_gem_shmem_get_sg_table() should not be directly called by drivers. 675 * 676 * Returns: 677 * A pointer to the scatter/gather table of pinned pages or errno on failure. 678 */ 679 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj) 680 { 681 int ret; 682 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 683 struct sg_table *sgt; 684 685 if (shmem->sgt) 686 return shmem->sgt; 687 688 WARN_ON(obj->import_attach); 689 690 ret = drm_gem_shmem_get_pages(shmem); 691 if (ret) 692 return ERR_PTR(ret); 693 694 sgt = drm_gem_shmem_get_sg_table(&shmem->base); 695 if (IS_ERR(sgt)) { 696 ret = PTR_ERR(sgt); 697 goto err_put_pages; 698 } 699 /* Map the pages for use by the h/w. */ 700 dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); 701 702 shmem->sgt = sgt; 703 704 return sgt; 705 706 err_put_pages: 707 drm_gem_shmem_put_pages(shmem); 708 return ERR_PTR(ret); 709 } 710 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt); 711 712 /** 713 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from 714 * another driver's scatter/gather table of pinned pages 715 * @dev: Device to import into 716 * @attach: DMA-BUF attachment 717 * @sgt: Scatter/gather table of pinned pages 718 * 719 * This function imports a scatter/gather table exported via DMA-BUF by 720 * another driver. Drivers that use the shmem helpers should set this as their 721 * &drm_driver.gem_prime_import_sg_table callback. 722 * 723 * Returns: 724 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative 725 * error code on failure. 726 */ 727 struct drm_gem_object * 728 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, 729 struct dma_buf_attachment *attach, 730 struct sg_table *sgt) 731 { 732 size_t size = PAGE_ALIGN(attach->dmabuf->size); 733 struct drm_gem_shmem_object *shmem; 734 735 shmem = __drm_gem_shmem_create(dev, size, true); 736 if (IS_ERR(shmem)) 737 return ERR_CAST(shmem); 738 739 shmem->sgt = sgt; 740 741 DRM_DEBUG_PRIME("size = %zu\n", size); 742 743 return &shmem->base; 744 } 745 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table); 746