1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright 2018 Noralf Trønnes 4 */ 5 6 #include <linux/dma-buf.h> 7 #include <linux/export.h> 8 #include <linux/mutex.h> 9 #include <linux/shmem_fs.h> 10 #include <linux/slab.h> 11 #include <linux/vmalloc.h> 12 13 #include <drm/drm.h> 14 #include <drm/drm_device.h> 15 #include <drm/drm_drv.h> 16 #include <drm/drm_gem_shmem_helper.h> 17 #include <drm/drm_prime.h> 18 #include <drm/drm_print.h> 19 20 /** 21 * DOC: overview 22 * 23 * This library provides helpers for GEM objects backed by shmem buffers 24 * allocated using anonymous pageable memory. 25 */ 26 27 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { 28 .free = drm_gem_shmem_free_object, 29 .print_info = drm_gem_shmem_print_info, 30 .pin = drm_gem_shmem_pin, 31 .unpin = drm_gem_shmem_unpin, 32 .get_sg_table = drm_gem_shmem_get_sg_table, 33 .vmap = drm_gem_shmem_vmap, 34 .vunmap = drm_gem_shmem_vunmap, 35 .vm_ops = &drm_gem_shmem_vm_ops, 36 }; 37 38 /** 39 * drm_gem_shmem_create - Allocate an object with the given size 40 * @dev: DRM device 41 * @size: Size of the object to allocate 42 * 43 * This function creates a shmem GEM object. 44 * 45 * Returns: 46 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative 47 * error code on failure. 48 */ 49 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size) 50 { 51 struct drm_gem_shmem_object *shmem; 52 struct drm_gem_object *obj; 53 int ret; 54 55 size = PAGE_ALIGN(size); 56 57 if (dev->driver->gem_create_object) 58 obj = dev->driver->gem_create_object(dev, size); 59 else 60 obj = kzalloc(sizeof(*shmem), GFP_KERNEL); 61 if (!obj) 62 return ERR_PTR(-ENOMEM); 63 64 if (!obj->funcs) 65 obj->funcs = &drm_gem_shmem_funcs; 66 67 ret = drm_gem_object_init(dev, obj, size); 68 if (ret) 69 goto err_free; 70 71 ret = drm_gem_create_mmap_offset(obj); 72 if (ret) 73 goto err_release; 74 75 shmem = to_drm_gem_shmem_obj(obj); 76 mutex_init(&shmem->pages_lock); 77 mutex_init(&shmem->vmap_lock); 78 79 /* 80 * Our buffers are kept pinned, so allocating them 81 * from the MOVABLE zone is a really bad idea, and 82 * conflicts with CMA. See comments above new_inode() 83 * why this is required _and_ expected if you're 84 * going to pin these pages. 85 */ 86 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | 87 __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 88 89 return shmem; 90 91 err_release: 92 drm_gem_object_release(obj); 93 err_free: 94 kfree(obj); 95 96 return ERR_PTR(ret); 97 } 98 EXPORT_SYMBOL_GPL(drm_gem_shmem_create); 99 100 /** 101 * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object 102 * @obj: GEM object to free 103 * 104 * This function cleans up the GEM object state and frees the memory used to 105 * store the object itself. 106 */ 107 void drm_gem_shmem_free_object(struct drm_gem_object *obj) 108 { 109 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 110 111 WARN_ON(shmem->vmap_use_count); 112 113 if (obj->import_attach) { 114 shmem->pages_use_count--; 115 drm_prime_gem_destroy(obj, shmem->sgt); 116 kvfree(shmem->pages); 117 } else { 118 if (shmem->sgt) { 119 dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl, 120 shmem->sgt->nents, DMA_BIDIRECTIONAL); 121 122 drm_gem_shmem_put_pages(shmem); 123 sg_free_table(shmem->sgt); 124 kfree(shmem->sgt); 125 } 126 } 127 128 WARN_ON(shmem->pages_use_count); 129 130 drm_gem_object_release(obj); 131 mutex_destroy(&shmem->pages_lock); 132 mutex_destroy(&shmem->vmap_lock); 133 kfree(shmem); 134 } 135 EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object); 136 137 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) 138 { 139 struct drm_gem_object *obj = &shmem->base; 140 struct page **pages; 141 142 if (shmem->pages_use_count++ > 0) 143 return 0; 144 145 pages = drm_gem_get_pages(obj); 146 if (IS_ERR(pages)) { 147 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages)); 148 shmem->pages_use_count = 0; 149 return PTR_ERR(pages); 150 } 151 152 shmem->pages = pages; 153 154 return 0; 155 } 156 157 /* 158 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object 159 * @shmem: shmem GEM object 160 * 161 * This function makes sure that backing pages exists for the shmem GEM object 162 * and increases the use count. 163 * 164 * Returns: 165 * 0 on success or a negative error code on failure. 166 */ 167 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) 168 { 169 int ret; 170 171 ret = mutex_lock_interruptible(&shmem->pages_lock); 172 if (ret) 173 return ret; 174 ret = drm_gem_shmem_get_pages_locked(shmem); 175 mutex_unlock(&shmem->pages_lock); 176 177 return ret; 178 } 179 EXPORT_SYMBOL(drm_gem_shmem_get_pages); 180 181 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) 182 { 183 struct drm_gem_object *obj = &shmem->base; 184 185 if (WARN_ON_ONCE(!shmem->pages_use_count)) 186 return; 187 188 if (--shmem->pages_use_count > 0) 189 return; 190 191 drm_gem_put_pages(obj, shmem->pages, 192 shmem->pages_mark_dirty_on_put, 193 shmem->pages_mark_accessed_on_put); 194 shmem->pages = NULL; 195 } 196 197 /* 198 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object 199 * @shmem: shmem GEM object 200 * 201 * This function decreases the use count and puts the backing pages when use drops to zero. 202 */ 203 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) 204 { 205 mutex_lock(&shmem->pages_lock); 206 drm_gem_shmem_put_pages_locked(shmem); 207 mutex_unlock(&shmem->pages_lock); 208 } 209 EXPORT_SYMBOL(drm_gem_shmem_put_pages); 210 211 /** 212 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object 213 * @obj: GEM object 214 * 215 * This function makes sure the backing pages are pinned in memory while the 216 * buffer is exported. 217 * 218 * Returns: 219 * 0 on success or a negative error code on failure. 220 */ 221 int drm_gem_shmem_pin(struct drm_gem_object *obj) 222 { 223 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 224 225 return drm_gem_shmem_get_pages(shmem); 226 } 227 EXPORT_SYMBOL(drm_gem_shmem_pin); 228 229 /** 230 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object 231 * @obj: GEM object 232 * 233 * This function removes the requirement that the backing pages are pinned in 234 * memory. 235 */ 236 void drm_gem_shmem_unpin(struct drm_gem_object *obj) 237 { 238 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 239 240 drm_gem_shmem_put_pages(shmem); 241 } 242 EXPORT_SYMBOL(drm_gem_shmem_unpin); 243 244 static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) 245 { 246 struct drm_gem_object *obj = &shmem->base; 247 int ret; 248 249 if (shmem->vmap_use_count++ > 0) 250 return shmem->vaddr; 251 252 ret = drm_gem_shmem_get_pages(shmem); 253 if (ret) 254 goto err_zero_use; 255 256 if (obj->import_attach) 257 shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf); 258 else 259 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, 260 VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 261 262 if (!shmem->vaddr) { 263 DRM_DEBUG_KMS("Failed to vmap pages\n"); 264 ret = -ENOMEM; 265 goto err_put_pages; 266 } 267 268 return shmem->vaddr; 269 270 err_put_pages: 271 drm_gem_shmem_put_pages(shmem); 272 err_zero_use: 273 shmem->vmap_use_count = 0; 274 275 return ERR_PTR(ret); 276 } 277 278 /* 279 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object 280 * @shmem: shmem GEM object 281 * 282 * This function makes sure that a virtual address exists for the buffer backing 283 * the shmem GEM object. 284 * 285 * Returns: 286 * 0 on success or a negative error code on failure. 287 */ 288 void *drm_gem_shmem_vmap(struct drm_gem_object *obj) 289 { 290 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 291 void *vaddr; 292 int ret; 293 294 ret = mutex_lock_interruptible(&shmem->vmap_lock); 295 if (ret) 296 return ERR_PTR(ret); 297 vaddr = drm_gem_shmem_vmap_locked(shmem); 298 mutex_unlock(&shmem->vmap_lock); 299 300 return vaddr; 301 } 302 EXPORT_SYMBOL(drm_gem_shmem_vmap); 303 304 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) 305 { 306 struct drm_gem_object *obj = &shmem->base; 307 308 if (WARN_ON_ONCE(!shmem->vmap_use_count)) 309 return; 310 311 if (--shmem->vmap_use_count > 0) 312 return; 313 314 if (obj->import_attach) 315 dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr); 316 else 317 vunmap(shmem->vaddr); 318 319 shmem->vaddr = NULL; 320 drm_gem_shmem_put_pages(shmem); 321 } 322 323 /* 324 * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object 325 * @shmem: shmem GEM object 326 * 327 * This function removes the virtual address when use count drops to zero. 328 */ 329 void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr) 330 { 331 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 332 333 mutex_lock(&shmem->vmap_lock); 334 drm_gem_shmem_vunmap_locked(shmem); 335 mutex_unlock(&shmem->vmap_lock); 336 } 337 EXPORT_SYMBOL(drm_gem_shmem_vunmap); 338 339 struct drm_gem_shmem_object * 340 drm_gem_shmem_create_with_handle(struct drm_file *file_priv, 341 struct drm_device *dev, size_t size, 342 uint32_t *handle) 343 { 344 struct drm_gem_shmem_object *shmem; 345 int ret; 346 347 shmem = drm_gem_shmem_create(dev, size); 348 if (IS_ERR(shmem)) 349 return shmem; 350 351 /* 352 * Allocate an id of idr table where the obj is registered 353 * and handle has the id what user can see. 354 */ 355 ret = drm_gem_handle_create(file_priv, &shmem->base, handle); 356 /* drop reference from allocate - handle holds it now. */ 357 drm_gem_object_put_unlocked(&shmem->base); 358 if (ret) 359 return ERR_PTR(ret); 360 361 return shmem; 362 } 363 EXPORT_SYMBOL(drm_gem_shmem_create_with_handle); 364 365 /** 366 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object 367 * @file: DRM file structure to create the dumb buffer for 368 * @dev: DRM device 369 * @args: IOCTL data 370 * 371 * This function computes the pitch of the dumb buffer and rounds it up to an 372 * integer number of bytes per pixel. Drivers for hardware that doesn't have 373 * any additional restrictions on the pitch can directly use this function as 374 * their &drm_driver.dumb_create callback. 375 * 376 * For hardware with additional restrictions, drivers can adjust the fields 377 * set up by userspace before calling into this function. 378 * 379 * Returns: 380 * 0 on success or a negative error code on failure. 381 */ 382 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, 383 struct drm_mode_create_dumb *args) 384 { 385 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 386 struct drm_gem_shmem_object *shmem; 387 388 if (!args->pitch || !args->size) { 389 args->pitch = min_pitch; 390 args->size = args->pitch * args->height; 391 } else { 392 /* ensure sane minimum values */ 393 if (args->pitch < min_pitch) 394 args->pitch = min_pitch; 395 if (args->size < args->pitch * args->height) 396 args->size = args->pitch * args->height; 397 } 398 399 shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle); 400 401 return PTR_ERR_OR_ZERO(shmem); 402 } 403 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create); 404 405 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) 406 { 407 struct vm_area_struct *vma = vmf->vma; 408 struct drm_gem_object *obj = vma->vm_private_data; 409 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 410 loff_t num_pages = obj->size >> PAGE_SHIFT; 411 struct page *page; 412 413 if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages)) 414 return VM_FAULT_SIGBUS; 415 416 page = shmem->pages[vmf->pgoff]; 417 418 return vmf_insert_page(vma, vmf->address, page); 419 } 420 421 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) 422 { 423 struct drm_gem_object *obj = vma->vm_private_data; 424 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 425 int ret; 426 427 ret = drm_gem_shmem_get_pages(shmem); 428 WARN_ON_ONCE(ret != 0); 429 430 drm_gem_vm_open(vma); 431 } 432 433 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) 434 { 435 struct drm_gem_object *obj = vma->vm_private_data; 436 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 437 438 drm_gem_shmem_put_pages(shmem); 439 drm_gem_vm_close(vma); 440 } 441 442 const struct vm_operations_struct drm_gem_shmem_vm_ops = { 443 .fault = drm_gem_shmem_fault, 444 .open = drm_gem_shmem_vm_open, 445 .close = drm_gem_shmem_vm_close, 446 }; 447 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops); 448 449 /** 450 * drm_gem_shmem_mmap - Memory-map a shmem GEM object 451 * @filp: File object 452 * @vma: VMA for the area to be mapped 453 * 454 * This function implements an augmented version of the GEM DRM file mmap 455 * operation for shmem objects. Drivers which employ the shmem helpers should 456 * use this function as their &file_operations.mmap handler in the DRM device file's 457 * file_operations structure. 458 * 459 * Instead of directly referencing this function, drivers should use the 460 * DEFINE_DRM_GEM_SHMEM_FOPS() macro. 461 * 462 * Returns: 463 * 0 on success or a negative error code on failure. 464 */ 465 int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma) 466 { 467 struct drm_gem_shmem_object *shmem; 468 int ret; 469 470 ret = drm_gem_mmap(filp, vma); 471 if (ret) 472 return ret; 473 474 shmem = to_drm_gem_shmem_obj(vma->vm_private_data); 475 476 ret = drm_gem_shmem_get_pages(shmem); 477 if (ret) { 478 drm_gem_vm_close(vma); 479 return ret; 480 } 481 482 /* VM_PFNMAP was set by drm_gem_mmap() */ 483 vma->vm_flags &= ~VM_PFNMAP; 484 vma->vm_flags |= VM_MIXEDMAP; 485 486 /* Remove the fake offset */ 487 vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node); 488 489 return 0; 490 } 491 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap); 492 493 /** 494 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs 495 * @p: DRM printer 496 * @indent: Tab indentation level 497 * @obj: GEM object 498 */ 499 void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent, 500 const struct drm_gem_object *obj) 501 { 502 const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 503 504 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); 505 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); 506 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); 507 } 508 EXPORT_SYMBOL(drm_gem_shmem_print_info); 509 510 /** 511 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned 512 * pages for a shmem GEM object 513 * @obj: GEM object 514 * 515 * This function exports a scatter/gather table suitable for PRIME usage by 516 * calling the standard DMA mapping API. 517 * 518 * Returns: 519 * A pointer to the scatter/gather table of pinned pages or NULL on failure. 520 */ 521 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj) 522 { 523 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 524 525 return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT); 526 } 527 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); 528 529 /** 530 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a 531 * scatter/gather table for a shmem GEM object. 532 * @obj: GEM object 533 * 534 * This function returns a scatter/gather table suitable for driver usage. If 535 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg 536 * table created. 537 * 538 * Returns: 539 * A pointer to the scatter/gather table of pinned pages or errno on failure. 540 */ 541 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj) 542 { 543 int ret; 544 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 545 struct sg_table *sgt; 546 547 if (shmem->sgt) 548 return shmem->sgt; 549 550 WARN_ON(obj->import_attach); 551 552 ret = drm_gem_shmem_get_pages(shmem); 553 if (ret) 554 return ERR_PTR(ret); 555 556 sgt = drm_gem_shmem_get_sg_table(&shmem->base); 557 if (IS_ERR(sgt)) { 558 ret = PTR_ERR(sgt); 559 goto err_put_pages; 560 } 561 /* Map the pages for use by the h/w. */ 562 dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); 563 564 shmem->sgt = sgt; 565 566 return sgt; 567 568 err_put_pages: 569 drm_gem_shmem_put_pages(shmem); 570 return ERR_PTR(ret); 571 } 572 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt); 573 574 /** 575 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from 576 * another driver's scatter/gather table of pinned pages 577 * @dev: Device to import into 578 * @attach: DMA-BUF attachment 579 * @sgt: Scatter/gather table of pinned pages 580 * 581 * This function imports a scatter/gather table exported via DMA-BUF by 582 * another driver. Drivers that use the shmem helpers should set this as their 583 * &drm_driver.gem_prime_import_sg_table callback. 584 * 585 * Returns: 586 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative 587 * error code on failure. 588 */ 589 struct drm_gem_object * 590 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, 591 struct dma_buf_attachment *attach, 592 struct sg_table *sgt) 593 { 594 size_t size = PAGE_ALIGN(attach->dmabuf->size); 595 size_t npages = size >> PAGE_SHIFT; 596 struct drm_gem_shmem_object *shmem; 597 int ret; 598 599 shmem = drm_gem_shmem_create(dev, size); 600 if (IS_ERR(shmem)) 601 return ERR_CAST(shmem); 602 603 shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 604 if (!shmem->pages) { 605 ret = -ENOMEM; 606 goto err_free_gem; 607 } 608 609 ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages); 610 if (ret < 0) 611 goto err_free_array; 612 613 shmem->sgt = sgt; 614 shmem->pages_use_count = 1; /* Permanently pinned from our point of view */ 615 616 DRM_DEBUG_PRIME("size = %zu\n", size); 617 618 return &shmem->base; 619 620 err_free_array: 621 kvfree(shmem->pages); 622 err_free_gem: 623 drm_gem_object_put_unlocked(&shmem->base); 624 625 return ERR_PTR(ret); 626 } 627 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table); 628