1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright 2018 Noralf Trønnes 4 */ 5 6 #include <linux/dma-buf.h> 7 #include <linux/export.h> 8 #include <linux/mutex.h> 9 #include <linux/shmem_fs.h> 10 #include <linux/slab.h> 11 #include <linux/vmalloc.h> 12 13 #include <drm/drm.h> 14 #include <drm/drm_device.h> 15 #include <drm/drm_drv.h> 16 #include <drm/drm_gem_shmem_helper.h> 17 #include <drm/drm_prime.h> 18 #include <drm/drm_print.h> 19 20 /** 21 * DOC: overview 22 * 23 * This library provides helpers for GEM objects backed by shmem buffers 24 * allocated using anonymous pageable memory. 25 */ 26 27 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { 28 .free = drm_gem_shmem_free_object, 29 .print_info = drm_gem_shmem_print_info, 30 .pin = drm_gem_shmem_pin, 31 .unpin = drm_gem_shmem_unpin, 32 .get_sg_table = drm_gem_shmem_get_sg_table, 33 .vmap = drm_gem_shmem_vmap, 34 .vunmap = drm_gem_shmem_vunmap, 35 .mmap = drm_gem_shmem_mmap, 36 }; 37 38 /** 39 * drm_gem_shmem_create - Allocate an object with the given size 40 * @dev: DRM device 41 * @size: Size of the object to allocate 42 * 43 * This function creates a shmem GEM object. 44 * 45 * Returns: 46 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative 47 * error code on failure. 48 */ 49 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size) 50 { 51 struct drm_gem_shmem_object *shmem; 52 struct drm_gem_object *obj; 53 int ret; 54 55 size = PAGE_ALIGN(size); 56 57 if (dev->driver->gem_create_object) 58 obj = dev->driver->gem_create_object(dev, size); 59 else 60 obj = kzalloc(sizeof(*shmem), GFP_KERNEL); 61 if (!obj) 62 return ERR_PTR(-ENOMEM); 63 64 if (!obj->funcs) 65 obj->funcs = &drm_gem_shmem_funcs; 66 67 ret = drm_gem_object_init(dev, obj, size); 68 if (ret) 69 goto err_free; 70 71 ret = drm_gem_create_mmap_offset(obj); 72 if (ret) 73 goto err_release; 74 75 shmem = to_drm_gem_shmem_obj(obj); 76 mutex_init(&shmem->pages_lock); 77 mutex_init(&shmem->vmap_lock); 78 INIT_LIST_HEAD(&shmem->madv_list); 79 80 /* 81 * Our buffers are kept pinned, so allocating them 82 * from the MOVABLE zone is a really bad idea, and 83 * conflicts with CMA. See comments above new_inode() 84 * why this is required _and_ expected if you're 85 * going to pin these pages. 86 */ 87 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | 88 __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 89 90 return shmem; 91 92 err_release: 93 drm_gem_object_release(obj); 94 err_free: 95 kfree(obj); 96 97 return ERR_PTR(ret); 98 } 99 EXPORT_SYMBOL_GPL(drm_gem_shmem_create); 100 101 /** 102 * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object 103 * @obj: GEM object to free 104 * 105 * This function cleans up the GEM object state and frees the memory used to 106 * store the object itself. 107 */ 108 void drm_gem_shmem_free_object(struct drm_gem_object *obj) 109 { 110 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 111 112 WARN_ON(shmem->vmap_use_count); 113 114 if (obj->import_attach) { 115 shmem->pages_use_count--; 116 drm_prime_gem_destroy(obj, shmem->sgt); 117 kvfree(shmem->pages); 118 } else { 119 if (shmem->sgt) { 120 dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl, 121 shmem->sgt->nents, DMA_BIDIRECTIONAL); 122 sg_free_table(shmem->sgt); 123 kfree(shmem->sgt); 124 } 125 if (shmem->pages) 126 drm_gem_shmem_put_pages(shmem); 127 } 128 129 WARN_ON(shmem->pages_use_count); 130 131 drm_gem_object_release(obj); 132 mutex_destroy(&shmem->pages_lock); 133 mutex_destroy(&shmem->vmap_lock); 134 kfree(shmem); 135 } 136 EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object); 137 138 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) 139 { 140 struct drm_gem_object *obj = &shmem->base; 141 struct page **pages; 142 143 if (shmem->pages_use_count++ > 0) 144 return 0; 145 146 pages = drm_gem_get_pages(obj); 147 if (IS_ERR(pages)) { 148 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages)); 149 shmem->pages_use_count = 0; 150 return PTR_ERR(pages); 151 } 152 153 shmem->pages = pages; 154 155 return 0; 156 } 157 158 /* 159 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object 160 * @shmem: shmem GEM object 161 * 162 * This function makes sure that backing pages exists for the shmem GEM object 163 * and increases the use count. 164 * 165 * Returns: 166 * 0 on success or a negative error code on failure. 167 */ 168 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) 169 { 170 int ret; 171 172 ret = mutex_lock_interruptible(&shmem->pages_lock); 173 if (ret) 174 return ret; 175 ret = drm_gem_shmem_get_pages_locked(shmem); 176 mutex_unlock(&shmem->pages_lock); 177 178 return ret; 179 } 180 EXPORT_SYMBOL(drm_gem_shmem_get_pages); 181 182 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) 183 { 184 struct drm_gem_object *obj = &shmem->base; 185 186 if (WARN_ON_ONCE(!shmem->pages_use_count)) 187 return; 188 189 if (--shmem->pages_use_count > 0) 190 return; 191 192 drm_gem_put_pages(obj, shmem->pages, 193 shmem->pages_mark_dirty_on_put, 194 shmem->pages_mark_accessed_on_put); 195 shmem->pages = NULL; 196 } 197 198 /* 199 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object 200 * @shmem: shmem GEM object 201 * 202 * This function decreases the use count and puts the backing pages when use drops to zero. 203 */ 204 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) 205 { 206 mutex_lock(&shmem->pages_lock); 207 drm_gem_shmem_put_pages_locked(shmem); 208 mutex_unlock(&shmem->pages_lock); 209 } 210 EXPORT_SYMBOL(drm_gem_shmem_put_pages); 211 212 /** 213 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object 214 * @obj: GEM object 215 * 216 * This function makes sure the backing pages are pinned in memory while the 217 * buffer is exported. 218 * 219 * Returns: 220 * 0 on success or a negative error code on failure. 221 */ 222 int drm_gem_shmem_pin(struct drm_gem_object *obj) 223 { 224 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 225 226 return drm_gem_shmem_get_pages(shmem); 227 } 228 EXPORT_SYMBOL(drm_gem_shmem_pin); 229 230 /** 231 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object 232 * @obj: GEM object 233 * 234 * This function removes the requirement that the backing pages are pinned in 235 * memory. 236 */ 237 void drm_gem_shmem_unpin(struct drm_gem_object *obj) 238 { 239 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 240 241 drm_gem_shmem_put_pages(shmem); 242 } 243 EXPORT_SYMBOL(drm_gem_shmem_unpin); 244 245 static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) 246 { 247 struct drm_gem_object *obj = &shmem->base; 248 int ret; 249 250 if (shmem->vmap_use_count++ > 0) 251 return shmem->vaddr; 252 253 ret = drm_gem_shmem_get_pages(shmem); 254 if (ret) 255 goto err_zero_use; 256 257 if (obj->import_attach) { 258 shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf); 259 } else { 260 pgprot_t prot = PAGE_KERNEL; 261 262 if (!shmem->map_cached) 263 prot = pgprot_writecombine(prot); 264 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, 265 VM_MAP, prot); 266 } 267 268 if (!shmem->vaddr) { 269 DRM_DEBUG_KMS("Failed to vmap pages\n"); 270 ret = -ENOMEM; 271 goto err_put_pages; 272 } 273 274 return shmem->vaddr; 275 276 err_put_pages: 277 drm_gem_shmem_put_pages(shmem); 278 err_zero_use: 279 shmem->vmap_use_count = 0; 280 281 return ERR_PTR(ret); 282 } 283 284 /* 285 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object 286 * @shmem: shmem GEM object 287 * 288 * This function makes sure that a virtual address exists for the buffer backing 289 * the shmem GEM object. 290 * 291 * Returns: 292 * 0 on success or a negative error code on failure. 293 */ 294 void *drm_gem_shmem_vmap(struct drm_gem_object *obj) 295 { 296 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 297 void *vaddr; 298 int ret; 299 300 ret = mutex_lock_interruptible(&shmem->vmap_lock); 301 if (ret) 302 return ERR_PTR(ret); 303 vaddr = drm_gem_shmem_vmap_locked(shmem); 304 mutex_unlock(&shmem->vmap_lock); 305 306 return vaddr; 307 } 308 EXPORT_SYMBOL(drm_gem_shmem_vmap); 309 310 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) 311 { 312 struct drm_gem_object *obj = &shmem->base; 313 314 if (WARN_ON_ONCE(!shmem->vmap_use_count)) 315 return; 316 317 if (--shmem->vmap_use_count > 0) 318 return; 319 320 if (obj->import_attach) 321 dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr); 322 else 323 vunmap(shmem->vaddr); 324 325 shmem->vaddr = NULL; 326 drm_gem_shmem_put_pages(shmem); 327 } 328 329 /* 330 * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object 331 * @shmem: shmem GEM object 332 * 333 * This function removes the virtual address when use count drops to zero. 334 */ 335 void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr) 336 { 337 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 338 339 mutex_lock(&shmem->vmap_lock); 340 drm_gem_shmem_vunmap_locked(shmem); 341 mutex_unlock(&shmem->vmap_lock); 342 } 343 EXPORT_SYMBOL(drm_gem_shmem_vunmap); 344 345 struct drm_gem_shmem_object * 346 drm_gem_shmem_create_with_handle(struct drm_file *file_priv, 347 struct drm_device *dev, size_t size, 348 uint32_t *handle) 349 { 350 struct drm_gem_shmem_object *shmem; 351 int ret; 352 353 shmem = drm_gem_shmem_create(dev, size); 354 if (IS_ERR(shmem)) 355 return shmem; 356 357 /* 358 * Allocate an id of idr table where the obj is registered 359 * and handle has the id what user can see. 360 */ 361 ret = drm_gem_handle_create(file_priv, &shmem->base, handle); 362 /* drop reference from allocate - handle holds it now. */ 363 drm_gem_object_put_unlocked(&shmem->base); 364 if (ret) 365 return ERR_PTR(ret); 366 367 return shmem; 368 } 369 EXPORT_SYMBOL(drm_gem_shmem_create_with_handle); 370 371 /* Update madvise status, returns true if not purged, else 372 * false or -errno. 373 */ 374 int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv) 375 { 376 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 377 378 mutex_lock(&shmem->pages_lock); 379 380 if (shmem->madv >= 0) 381 shmem->madv = madv; 382 383 madv = shmem->madv; 384 385 mutex_unlock(&shmem->pages_lock); 386 387 return (madv >= 0); 388 } 389 EXPORT_SYMBOL(drm_gem_shmem_madvise); 390 391 void drm_gem_shmem_purge_locked(struct drm_gem_object *obj) 392 { 393 struct drm_device *dev = obj->dev; 394 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 395 396 WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); 397 398 dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl, 399 shmem->sgt->nents, DMA_BIDIRECTIONAL); 400 sg_free_table(shmem->sgt); 401 kfree(shmem->sgt); 402 shmem->sgt = NULL; 403 404 drm_gem_shmem_put_pages_locked(shmem); 405 406 shmem->madv = -1; 407 408 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 409 drm_gem_free_mmap_offset(obj); 410 411 /* Our goal here is to return as much of the memory as 412 * is possible back to the system as we are called from OOM. 413 * To do this we must instruct the shmfs to drop all of its 414 * backing pages, *now*. 415 */ 416 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 417 418 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 419 0, (loff_t)-1); 420 } 421 EXPORT_SYMBOL(drm_gem_shmem_purge_locked); 422 423 bool drm_gem_shmem_purge(struct drm_gem_object *obj) 424 { 425 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 426 427 if (!mutex_trylock(&shmem->pages_lock)) 428 return false; 429 drm_gem_shmem_purge_locked(obj); 430 mutex_unlock(&shmem->pages_lock); 431 432 return true; 433 } 434 EXPORT_SYMBOL(drm_gem_shmem_purge); 435 436 /** 437 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object 438 * @file: DRM file structure to create the dumb buffer for 439 * @dev: DRM device 440 * @args: IOCTL data 441 * 442 * This function computes the pitch of the dumb buffer and rounds it up to an 443 * integer number of bytes per pixel. Drivers for hardware that doesn't have 444 * any additional restrictions on the pitch can directly use this function as 445 * their &drm_driver.dumb_create callback. 446 * 447 * For hardware with additional restrictions, drivers can adjust the fields 448 * set up by userspace before calling into this function. 449 * 450 * Returns: 451 * 0 on success or a negative error code on failure. 452 */ 453 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, 454 struct drm_mode_create_dumb *args) 455 { 456 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 457 struct drm_gem_shmem_object *shmem; 458 459 if (!args->pitch || !args->size) { 460 args->pitch = min_pitch; 461 args->size = args->pitch * args->height; 462 } else { 463 /* ensure sane minimum values */ 464 if (args->pitch < min_pitch) 465 args->pitch = min_pitch; 466 if (args->size < args->pitch * args->height) 467 args->size = args->pitch * args->height; 468 } 469 470 shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle); 471 472 return PTR_ERR_OR_ZERO(shmem); 473 } 474 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create); 475 476 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) 477 { 478 struct vm_area_struct *vma = vmf->vma; 479 struct drm_gem_object *obj = vma->vm_private_data; 480 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 481 loff_t num_pages = obj->size >> PAGE_SHIFT; 482 struct page *page; 483 484 if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages)) 485 return VM_FAULT_SIGBUS; 486 487 page = shmem->pages[vmf->pgoff]; 488 489 return vmf_insert_page(vma, vmf->address, page); 490 } 491 492 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) 493 { 494 struct drm_gem_object *obj = vma->vm_private_data; 495 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 496 int ret; 497 498 ret = drm_gem_shmem_get_pages(shmem); 499 WARN_ON_ONCE(ret != 0); 500 501 drm_gem_vm_open(vma); 502 } 503 504 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) 505 { 506 struct drm_gem_object *obj = vma->vm_private_data; 507 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 508 509 drm_gem_shmem_put_pages(shmem); 510 drm_gem_vm_close(vma); 511 } 512 513 static const struct vm_operations_struct drm_gem_shmem_vm_ops = { 514 .fault = drm_gem_shmem_fault, 515 .open = drm_gem_shmem_vm_open, 516 .close = drm_gem_shmem_vm_close, 517 }; 518 519 /** 520 * drm_gem_shmem_mmap - Memory-map a shmem GEM object 521 * @obj: gem object 522 * @vma: VMA for the area to be mapped 523 * 524 * This function implements an augmented version of the GEM DRM file mmap 525 * operation for shmem objects. Drivers which employ the shmem helpers should 526 * use this function as their &drm_gem_object_funcs.mmap handler. 527 * 528 * Returns: 529 * 0 on success or a negative error code on failure. 530 */ 531 int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 532 { 533 struct drm_gem_shmem_object *shmem; 534 int ret; 535 536 /* Remove the fake offset */ 537 vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); 538 539 shmem = to_drm_gem_shmem_obj(obj); 540 541 ret = drm_gem_shmem_get_pages(shmem); 542 if (ret) { 543 drm_gem_vm_close(vma); 544 return ret; 545 } 546 547 vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; 548 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 549 if (!shmem->map_cached) 550 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 551 vma->vm_ops = &drm_gem_shmem_vm_ops; 552 553 return 0; 554 } 555 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap); 556 557 /** 558 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs 559 * @p: DRM printer 560 * @indent: Tab indentation level 561 * @obj: GEM object 562 */ 563 void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent, 564 const struct drm_gem_object *obj) 565 { 566 const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 567 568 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); 569 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); 570 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); 571 } 572 EXPORT_SYMBOL(drm_gem_shmem_print_info); 573 574 /** 575 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned 576 * pages for a shmem GEM object 577 * @obj: GEM object 578 * 579 * This function exports a scatter/gather table suitable for PRIME usage by 580 * calling the standard DMA mapping API. 581 * 582 * Returns: 583 * A pointer to the scatter/gather table of pinned pages or NULL on failure. 584 */ 585 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj) 586 { 587 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 588 589 return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT); 590 } 591 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); 592 593 /** 594 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a 595 * scatter/gather table for a shmem GEM object. 596 * @obj: GEM object 597 * 598 * This function returns a scatter/gather table suitable for driver usage. If 599 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg 600 * table created. 601 * 602 * Returns: 603 * A pointer to the scatter/gather table of pinned pages or errno on failure. 604 */ 605 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj) 606 { 607 int ret; 608 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 609 struct sg_table *sgt; 610 611 if (shmem->sgt) 612 return shmem->sgt; 613 614 WARN_ON(obj->import_attach); 615 616 ret = drm_gem_shmem_get_pages(shmem); 617 if (ret) 618 return ERR_PTR(ret); 619 620 sgt = drm_gem_shmem_get_sg_table(&shmem->base); 621 if (IS_ERR(sgt)) { 622 ret = PTR_ERR(sgt); 623 goto err_put_pages; 624 } 625 /* Map the pages for use by the h/w. */ 626 dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); 627 628 shmem->sgt = sgt; 629 630 return sgt; 631 632 err_put_pages: 633 drm_gem_shmem_put_pages(shmem); 634 return ERR_PTR(ret); 635 } 636 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt); 637 638 /** 639 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from 640 * another driver's scatter/gather table of pinned pages 641 * @dev: Device to import into 642 * @attach: DMA-BUF attachment 643 * @sgt: Scatter/gather table of pinned pages 644 * 645 * This function imports a scatter/gather table exported via DMA-BUF by 646 * another driver. Drivers that use the shmem helpers should set this as their 647 * &drm_driver.gem_prime_import_sg_table callback. 648 * 649 * Returns: 650 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative 651 * error code on failure. 652 */ 653 struct drm_gem_object * 654 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, 655 struct dma_buf_attachment *attach, 656 struct sg_table *sgt) 657 { 658 size_t size = PAGE_ALIGN(attach->dmabuf->size); 659 size_t npages = size >> PAGE_SHIFT; 660 struct drm_gem_shmem_object *shmem; 661 int ret; 662 663 shmem = drm_gem_shmem_create(dev, size); 664 if (IS_ERR(shmem)) 665 return ERR_CAST(shmem); 666 667 shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 668 if (!shmem->pages) { 669 ret = -ENOMEM; 670 goto err_free_gem; 671 } 672 673 ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages); 674 if (ret < 0) 675 goto err_free_array; 676 677 shmem->sgt = sgt; 678 shmem->pages_use_count = 1; /* Permanently pinned from our point of view */ 679 680 DRM_DEBUG_PRIME("size = %zu\n", size); 681 682 return &shmem->base; 683 684 err_free_array: 685 kvfree(shmem->pages); 686 err_free_gem: 687 drm_gem_object_put_unlocked(&shmem->base); 688 689 return ERR_PTR(ret); 690 } 691 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table); 692