1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 #include <linux/dma-buf-map.h> 4 #include <linux/module.h> 5 6 #include <drm/drm_debugfs.h> 7 #include <drm/drm_device.h> 8 #include <drm/drm_drv.h> 9 #include <drm/drm_file.h> 10 #include <drm/drm_framebuffer.h> 11 #include <drm/drm_gem_atomic_helper.h> 12 #include <drm/drm_gem_ttm_helper.h> 13 #include <drm/drm_gem_vram_helper.h> 14 #include <drm/drm_managed.h> 15 #include <drm/drm_mode.h> 16 #include <drm/drm_plane.h> 17 #include <drm/drm_prime.h> 18 #include <drm/drm_simple_kms_helper.h> 19 20 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; 21 22 /** 23 * DOC: overview 24 * 25 * This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM 26 * buffer object that is backed by video RAM (VRAM). It can be used for 27 * framebuffer devices with dedicated memory. 28 * 29 * The data structure &struct drm_vram_mm and its helpers implement a memory 30 * manager for simple framebuffer devices with dedicated video memory. GEM 31 * VRAM buffer objects are either placed in the video memory or remain evicted 32 * to system memory. 33 * 34 * With the GEM interface userspace applications create, manage and destroy 35 * graphics buffers, such as an on-screen framebuffer. GEM does not provide 36 * an implementation of these interfaces. It's up to the DRM driver to 37 * provide an implementation that suits the hardware. If the hardware device 38 * contains dedicated video memory, the DRM driver can use the VRAM helper 39 * library. Each active buffer object is stored in video RAM. Active 40 * buffer are used for drawing the current frame, typically something like 41 * the frame's scanout buffer or the cursor image. If there's no more space 42 * left in VRAM, inactive GEM objects can be moved to system memory. 43 * 44 * To initialize the VRAM helper library call drmm_vram_helper_alloc_mm(). 45 * The function allocates and initializes an instance of &struct drm_vram_mm 46 * in &struct drm_device.vram_mm . Use &DRM_GEM_VRAM_DRIVER to initialize 47 * &struct drm_driver and &DRM_VRAM_MM_FILE_OPERATIONS to initialize 48 * &struct file_operations; as illustrated below. 49 * 50 * .. code-block:: c 51 * 52 * struct file_operations fops ={ 53 * .owner = THIS_MODULE, 54 * DRM_VRAM_MM_FILE_OPERATION 55 * }; 56 * struct drm_driver drv = { 57 * .driver_feature = DRM_ ... , 58 * .fops = &fops, 59 * DRM_GEM_VRAM_DRIVER 60 * }; 61 * 62 * int init_drm_driver() 63 * { 64 * struct drm_device *dev; 65 * uint64_t vram_base; 66 * unsigned long vram_size; 67 * int ret; 68 * 69 * // setup device, vram base and size 70 * // ... 71 * 72 * ret = drmm_vram_helper_alloc_mm(dev, vram_base, vram_size); 73 * if (ret) 74 * return ret; 75 * return 0; 76 * } 77 * 78 * This creates an instance of &struct drm_vram_mm, exports DRM userspace 79 * interfaces for GEM buffer management and initializes file operations to 80 * allow for accessing created GEM buffers. With this setup, the DRM driver 81 * manages an area of video RAM with VRAM MM and provides GEM VRAM objects 82 * to userspace. 83 * 84 * You don't have to clean up the instance of VRAM MM. 85 * drmm_vram_helper_alloc_mm() is a managed interface that installs a 86 * clean-up handler to run during the DRM device's release. 87 * 88 * For drawing or scanout operations, rsp. buffer objects have to be pinned 89 * in video RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or 90 * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system 91 * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards. 92 * 93 * A buffer object that is pinned in video RAM has a fixed address within that 94 * memory region. Call drm_gem_vram_offset() to retrieve this value. Typically 95 * it's used to program the hardware's scanout engine for framebuffers, set 96 * the cursor overlay's image for a mouse cursor, or use it as input to the 97 * hardware's draing engine. 98 * 99 * To access a buffer object's memory from the DRM driver, call 100 * drm_gem_vram_vmap(). It maps the buffer into kernel address 101 * space and returns the memory address. Use drm_gem_vram_vunmap() to 102 * release the mapping. 103 */ 104 105 /* 106 * Buffer-objects helpers 107 */ 108 109 static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo) 110 { 111 /* We got here via ttm_bo_put(), which means that the 112 * TTM buffer object in 'bo' has already been cleaned 113 * up; only release the GEM object. 114 */ 115 116 WARN_ON(gbo->vmap_use_count); 117 WARN_ON(dma_buf_map_is_set(&gbo->map)); 118 119 drm_gem_object_release(&gbo->bo.base); 120 } 121 122 static void drm_gem_vram_destroy(struct drm_gem_vram_object *gbo) 123 { 124 drm_gem_vram_cleanup(gbo); 125 kfree(gbo); 126 } 127 128 static void ttm_buffer_object_destroy(struct ttm_buffer_object *bo) 129 { 130 struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo); 131 132 drm_gem_vram_destroy(gbo); 133 } 134 135 static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo, 136 unsigned long pl_flag) 137 { 138 u32 invariant_flags = 0; 139 unsigned int i; 140 unsigned int c = 0; 141 142 if (pl_flag & DRM_GEM_VRAM_PL_FLAG_TOPDOWN) 143 invariant_flags = TTM_PL_FLAG_TOPDOWN; 144 145 gbo->placement.placement = gbo->placements; 146 gbo->placement.busy_placement = gbo->placements; 147 148 if (pl_flag & DRM_GEM_VRAM_PL_FLAG_VRAM) { 149 gbo->placements[c].mem_type = TTM_PL_VRAM; 150 gbo->placements[c++].flags = invariant_flags; 151 } 152 153 if (pl_flag & DRM_GEM_VRAM_PL_FLAG_SYSTEM || !c) { 154 gbo->placements[c].mem_type = TTM_PL_SYSTEM; 155 gbo->placements[c++].flags = invariant_flags; 156 } 157 158 gbo->placement.num_placement = c; 159 gbo->placement.num_busy_placement = c; 160 161 for (i = 0; i < c; ++i) { 162 gbo->placements[i].fpfn = 0; 163 gbo->placements[i].lpfn = 0; 164 } 165 } 166 167 /** 168 * drm_gem_vram_create() - Creates a VRAM-backed GEM object 169 * @dev: the DRM device 170 * @size: the buffer size in bytes 171 * @pg_align: the buffer's alignment in multiples of the page size 172 * 173 * GEM objects are allocated by calling struct drm_driver.gem_create_object, 174 * if set. Otherwise kzalloc() will be used. Drivers can set their own GEM 175 * object functions in struct drm_driver.gem_create_object. If no functions 176 * are set, the new GEM object will use the default functions from GEM VRAM 177 * helpers. 178 * 179 * Returns: 180 * A new instance of &struct drm_gem_vram_object on success, or 181 * an ERR_PTR()-encoded error code otherwise. 182 */ 183 struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, 184 size_t size, 185 unsigned long pg_align) 186 { 187 struct drm_gem_vram_object *gbo; 188 struct drm_gem_object *gem; 189 struct drm_vram_mm *vmm = dev->vram_mm; 190 struct ttm_device *bdev; 191 int ret; 192 193 if (WARN_ONCE(!vmm, "VRAM MM not initialized")) 194 return ERR_PTR(-EINVAL); 195 196 if (dev->driver->gem_create_object) { 197 gem = dev->driver->gem_create_object(dev, size); 198 if (!gem) 199 return ERR_PTR(-ENOMEM); 200 gbo = drm_gem_vram_of_gem(gem); 201 } else { 202 gbo = kzalloc(sizeof(*gbo), GFP_KERNEL); 203 if (!gbo) 204 return ERR_PTR(-ENOMEM); 205 gem = &gbo->bo.base; 206 } 207 208 if (!gem->funcs) 209 gem->funcs = &drm_gem_vram_object_funcs; 210 211 ret = drm_gem_object_init(dev, gem, size); 212 if (ret) { 213 kfree(gbo); 214 return ERR_PTR(ret); 215 } 216 217 bdev = &vmm->bdev; 218 219 gbo->bo.bdev = bdev; 220 drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM); 221 222 /* 223 * A failing ttm_bo_init will call ttm_buffer_object_destroy 224 * to release gbo->bo.base and kfree gbo. 225 */ 226 ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device, 227 &gbo->placement, pg_align, false, NULL, NULL, 228 ttm_buffer_object_destroy); 229 if (ret) 230 return ERR_PTR(ret); 231 232 return gbo; 233 } 234 EXPORT_SYMBOL(drm_gem_vram_create); 235 236 /** 237 * drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object 238 * @gbo: the GEM VRAM object 239 * 240 * See ttm_bo_put() for more information. 241 */ 242 void drm_gem_vram_put(struct drm_gem_vram_object *gbo) 243 { 244 ttm_bo_put(&gbo->bo); 245 } 246 EXPORT_SYMBOL(drm_gem_vram_put); 247 248 /** 249 * drm_gem_vram_mmap_offset() - Returns a GEM VRAM object's mmap offset 250 * @gbo: the GEM VRAM object 251 * 252 * See drm_vma_node_offset_addr() for more information. 253 * 254 * Returns: 255 * The buffer object's offset for userspace mappings on success, or 256 * 0 if no offset is allocated. 257 */ 258 u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo) 259 { 260 return drm_vma_node_offset_addr(&gbo->bo.base.vma_node); 261 } 262 EXPORT_SYMBOL(drm_gem_vram_mmap_offset); 263 264 static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo) 265 { 266 /* Keep TTM behavior for now, remove when drivers are audited */ 267 if (WARN_ON_ONCE(!gbo->bo.mem.mm_node)) 268 return 0; 269 270 return gbo->bo.mem.start; 271 } 272 273 /** 274 * drm_gem_vram_offset() - \ 275 Returns a GEM VRAM object's offset in video memory 276 * @gbo: the GEM VRAM object 277 * 278 * This function returns the buffer object's offset in the device's video 279 * memory. The buffer object has to be pinned to %TTM_PL_VRAM. 280 * 281 * Returns: 282 * The buffer object's offset in video memory on success, or 283 * a negative errno code otherwise. 284 */ 285 s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo) 286 { 287 if (WARN_ON_ONCE(!gbo->bo.pin_count)) 288 return (s64)-ENODEV; 289 return drm_gem_vram_pg_offset(gbo) << PAGE_SHIFT; 290 } 291 EXPORT_SYMBOL(drm_gem_vram_offset); 292 293 static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo, 294 unsigned long pl_flag) 295 { 296 struct ttm_operation_ctx ctx = { false, false }; 297 int ret; 298 299 if (gbo->bo.pin_count) 300 goto out; 301 302 if (pl_flag) 303 drm_gem_vram_placement(gbo, pl_flag); 304 305 ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx); 306 if (ret < 0) 307 return ret; 308 309 out: 310 ttm_bo_pin(&gbo->bo); 311 312 return 0; 313 } 314 315 /** 316 * drm_gem_vram_pin() - Pins a GEM VRAM object in a region. 317 * @gbo: the GEM VRAM object 318 * @pl_flag: a bitmask of possible memory regions 319 * 320 * Pinning a buffer object ensures that it is not evicted from 321 * a memory region. A pinned buffer object has to be unpinned before 322 * it can be pinned to another region. If the pl_flag argument is 0, 323 * the buffer is pinned at its current location (video RAM or system 324 * memory). 325 * 326 * Small buffer objects, such as cursor images, can lead to memory 327 * fragmentation if they are pinned in the middle of video RAM. This 328 * is especially a problem on devices with only a small amount of 329 * video RAM. Fragmentation can prevent the primary framebuffer from 330 * fitting in, even though there's enough memory overall. The modifier 331 * DRM_GEM_VRAM_PL_FLAG_TOPDOWN marks the buffer object to be pinned 332 * at the high end of the memory region to avoid fragmentation. 333 * 334 * Returns: 335 * 0 on success, or 336 * a negative error code otherwise. 337 */ 338 int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag) 339 { 340 int ret; 341 342 ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); 343 if (ret) 344 return ret; 345 ret = drm_gem_vram_pin_locked(gbo, pl_flag); 346 ttm_bo_unreserve(&gbo->bo); 347 348 return ret; 349 } 350 EXPORT_SYMBOL(drm_gem_vram_pin); 351 352 static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo) 353 { 354 ttm_bo_unpin(&gbo->bo); 355 } 356 357 /** 358 * drm_gem_vram_unpin() - Unpins a GEM VRAM object 359 * @gbo: the GEM VRAM object 360 * 361 * Returns: 362 * 0 on success, or 363 * a negative error code otherwise. 364 */ 365 int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo) 366 { 367 int ret; 368 369 ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); 370 if (ret) 371 return ret; 372 373 drm_gem_vram_unpin_locked(gbo); 374 ttm_bo_unreserve(&gbo->bo); 375 376 return 0; 377 } 378 EXPORT_SYMBOL(drm_gem_vram_unpin); 379 380 static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo, 381 struct dma_buf_map *map) 382 { 383 int ret; 384 385 if (gbo->vmap_use_count > 0) 386 goto out; 387 388 /* 389 * VRAM helpers unmap the BO only on demand. So the previous 390 * page mapping might still be around. Only vmap if the there's 391 * no mapping present. 392 */ 393 if (dma_buf_map_is_null(&gbo->map)) { 394 ret = ttm_bo_vmap(&gbo->bo, &gbo->map); 395 if (ret) 396 return ret; 397 } 398 399 out: 400 ++gbo->vmap_use_count; 401 *map = gbo->map; 402 403 return 0; 404 } 405 406 static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo, 407 struct dma_buf_map *map) 408 { 409 struct drm_device *dev = gbo->bo.base.dev; 410 411 if (drm_WARN_ON_ONCE(dev, !gbo->vmap_use_count)) 412 return; 413 414 if (drm_WARN_ON_ONCE(dev, !dma_buf_map_is_equal(&gbo->map, map))) 415 return; /* BUG: map not mapped from this BO */ 416 417 if (--gbo->vmap_use_count > 0) 418 return; 419 420 /* 421 * Permanently mapping and unmapping buffers adds overhead from 422 * updating the page tables and creates debugging output. Therefore, 423 * we delay the actual unmap operation until the BO gets evicted 424 * from memory. See drm_gem_vram_bo_driver_move_notify(). 425 */ 426 } 427 428 /** 429 * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address 430 * space 431 * @gbo: The GEM VRAM object to map 432 * @map: Returns the kernel virtual address of the VRAM GEM object's backing 433 * store. 434 * 435 * The vmap function pins a GEM VRAM object to its current location, either 436 * system or video memory, and maps its buffer into kernel address space. 437 * As pinned object cannot be relocated, you should avoid pinning objects 438 * permanently. Call drm_gem_vram_vunmap() with the returned address to 439 * unmap and unpin the GEM VRAM object. 440 * 441 * Returns: 442 * 0 on success, or a negative error code otherwise. 443 */ 444 int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map) 445 { 446 int ret; 447 448 ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); 449 if (ret) 450 return ret; 451 452 ret = drm_gem_vram_pin_locked(gbo, 0); 453 if (ret) 454 goto err_ttm_bo_unreserve; 455 ret = drm_gem_vram_kmap_locked(gbo, map); 456 if (ret) 457 goto err_drm_gem_vram_unpin_locked; 458 459 ttm_bo_unreserve(&gbo->bo); 460 461 return 0; 462 463 err_drm_gem_vram_unpin_locked: 464 drm_gem_vram_unpin_locked(gbo); 465 err_ttm_bo_unreserve: 466 ttm_bo_unreserve(&gbo->bo); 467 return ret; 468 } 469 EXPORT_SYMBOL(drm_gem_vram_vmap); 470 471 /** 472 * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object 473 * @gbo: The GEM VRAM object to unmap 474 * @map: Kernel virtual address where the VRAM GEM object was mapped 475 * 476 * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See 477 * the documentation for drm_gem_vram_vmap() for more information. 478 */ 479 void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map) 480 { 481 int ret; 482 483 ret = ttm_bo_reserve(&gbo->bo, false, false, NULL); 484 if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret)) 485 return; 486 487 drm_gem_vram_kunmap_locked(gbo, map); 488 drm_gem_vram_unpin_locked(gbo); 489 490 ttm_bo_unreserve(&gbo->bo); 491 } 492 EXPORT_SYMBOL(drm_gem_vram_vunmap); 493 494 /** 495 * drm_gem_vram_fill_create_dumb() - \ 496 Helper for implementing &struct drm_driver.dumb_create 497 * @file: the DRM file 498 * @dev: the DRM device 499 * @pg_align: the buffer's alignment in multiples of the page size 500 * @pitch_align: the scanline's alignment in powers of 2 501 * @args: the arguments as provided to \ 502 &struct drm_driver.dumb_create 503 * 504 * This helper function fills &struct drm_mode_create_dumb, which is used 505 * by &struct drm_driver.dumb_create. Implementations of this interface 506 * should forwards their arguments to this helper, plus the driver-specific 507 * parameters. 508 * 509 * Returns: 510 * 0 on success, or 511 * a negative error code otherwise. 512 */ 513 int drm_gem_vram_fill_create_dumb(struct drm_file *file, 514 struct drm_device *dev, 515 unsigned long pg_align, 516 unsigned long pitch_align, 517 struct drm_mode_create_dumb *args) 518 { 519 size_t pitch, size; 520 struct drm_gem_vram_object *gbo; 521 int ret; 522 u32 handle; 523 524 pitch = args->width * DIV_ROUND_UP(args->bpp, 8); 525 if (pitch_align) { 526 if (WARN_ON_ONCE(!is_power_of_2(pitch_align))) 527 return -EINVAL; 528 pitch = ALIGN(pitch, pitch_align); 529 } 530 size = pitch * args->height; 531 532 size = roundup(size, PAGE_SIZE); 533 if (!size) 534 return -EINVAL; 535 536 gbo = drm_gem_vram_create(dev, size, pg_align); 537 if (IS_ERR(gbo)) 538 return PTR_ERR(gbo); 539 540 ret = drm_gem_handle_create(file, &gbo->bo.base, &handle); 541 if (ret) 542 goto err_drm_gem_object_put; 543 544 drm_gem_object_put(&gbo->bo.base); 545 546 args->pitch = pitch; 547 args->size = size; 548 args->handle = handle; 549 550 return 0; 551 552 err_drm_gem_object_put: 553 drm_gem_object_put(&gbo->bo.base); 554 return ret; 555 } 556 EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb); 557 558 /* 559 * Helpers for struct ttm_device_funcs 560 */ 561 562 static bool drm_is_gem_vram(struct ttm_buffer_object *bo) 563 { 564 return (bo->destroy == ttm_buffer_object_destroy); 565 } 566 567 static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo, 568 struct ttm_placement *pl) 569 { 570 drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM); 571 *pl = gbo->placement; 572 } 573 574 static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo) 575 { 576 struct ttm_buffer_object *bo = &gbo->bo; 577 struct drm_device *dev = bo->base.dev; 578 579 if (drm_WARN_ON_ONCE(dev, gbo->vmap_use_count)) 580 return; 581 582 ttm_bo_vunmap(bo, &gbo->map); 583 dma_buf_map_clear(&gbo->map); /* explicitly clear mapping for next vmap call */ 584 } 585 586 static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo, 587 bool evict, 588 struct ttm_operation_ctx *ctx, 589 struct ttm_resource *new_mem) 590 { 591 drm_gem_vram_bo_driver_move_notify(gbo); 592 return ttm_bo_move_memcpy(&gbo->bo, ctx, new_mem); 593 } 594 595 /* 596 * Helpers for struct drm_gem_object_funcs 597 */ 598 599 /** 600 * drm_gem_vram_object_free() - \ 601 Implements &struct drm_gem_object_funcs.free 602 * @gem: GEM object. Refers to &struct drm_gem_vram_object.gem 603 */ 604 static void drm_gem_vram_object_free(struct drm_gem_object *gem) 605 { 606 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); 607 608 drm_gem_vram_put(gbo); 609 } 610 611 /* 612 * Helpers for dump buffers 613 */ 614 615 /** 616 * drm_gem_vram_driver_dumb_create() - \ 617 Implements &struct drm_driver.dumb_create 618 * @file: the DRM file 619 * @dev: the DRM device 620 * @args: the arguments as provided to \ 621 &struct drm_driver.dumb_create 622 * 623 * This function requires the driver to use @drm_device.vram_mm for its 624 * instance of VRAM MM. 625 * 626 * Returns: 627 * 0 on success, or 628 * a negative error code otherwise. 629 */ 630 int drm_gem_vram_driver_dumb_create(struct drm_file *file, 631 struct drm_device *dev, 632 struct drm_mode_create_dumb *args) 633 { 634 if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized")) 635 return -EINVAL; 636 637 return drm_gem_vram_fill_create_dumb(file, dev, 0, 0, args); 638 } 639 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create); 640 641 /** 642 * drm_gem_vram_driver_dumb_mmap_offset() - \ 643 Implements &struct drm_driver.dumb_mmap_offset 644 * @file: DRM file pointer. 645 * @dev: DRM device. 646 * @handle: GEM handle 647 * @offset: Returns the mapping's memory offset on success 648 * 649 * Returns: 650 * 0 on success, or 651 * a negative errno code otherwise. 652 */ 653 int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file, 654 struct drm_device *dev, 655 uint32_t handle, uint64_t *offset) 656 { 657 struct drm_gem_object *gem; 658 struct drm_gem_vram_object *gbo; 659 660 gem = drm_gem_object_lookup(file, handle); 661 if (!gem) 662 return -ENOENT; 663 664 gbo = drm_gem_vram_of_gem(gem); 665 *offset = drm_gem_vram_mmap_offset(gbo); 666 667 drm_gem_object_put(gem); 668 669 return 0; 670 } 671 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset); 672 673 /* 674 * Helpers for struct drm_plane_helper_funcs 675 */ 676 677 /** 678 * drm_gem_vram_plane_helper_prepare_fb() - \ 679 * Implements &struct drm_plane_helper_funcs.prepare_fb 680 * @plane: a DRM plane 681 * @new_state: the plane's new state 682 * 683 * During plane updates, this function sets the plane's fence and 684 * pins the GEM VRAM objects of the plane's new framebuffer to VRAM. 685 * Call drm_gem_vram_plane_helper_cleanup_fb() to unpin them. 686 * 687 * Returns: 688 * 0 on success, or 689 * a negative errno code otherwise. 690 */ 691 int 692 drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane, 693 struct drm_plane_state *new_state) 694 { 695 size_t i; 696 struct drm_gem_vram_object *gbo; 697 int ret; 698 699 if (!new_state->fb) 700 return 0; 701 702 for (i = 0; i < ARRAY_SIZE(new_state->fb->obj); ++i) { 703 if (!new_state->fb->obj[i]) 704 continue; 705 gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]); 706 ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM); 707 if (ret) 708 goto err_drm_gem_vram_unpin; 709 } 710 711 ret = drm_gem_plane_helper_prepare_fb(plane, new_state); 712 if (ret) 713 goto err_drm_gem_vram_unpin; 714 715 return 0; 716 717 err_drm_gem_vram_unpin: 718 while (i) { 719 --i; 720 gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]); 721 drm_gem_vram_unpin(gbo); 722 } 723 return ret; 724 } 725 EXPORT_SYMBOL(drm_gem_vram_plane_helper_prepare_fb); 726 727 /** 728 * drm_gem_vram_plane_helper_cleanup_fb() - \ 729 * Implements &struct drm_plane_helper_funcs.cleanup_fb 730 * @plane: a DRM plane 731 * @old_state: the plane's old state 732 * 733 * During plane updates, this function unpins the GEM VRAM 734 * objects of the plane's old framebuffer from VRAM. Complements 735 * drm_gem_vram_plane_helper_prepare_fb(). 736 */ 737 void 738 drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane, 739 struct drm_plane_state *old_state) 740 { 741 size_t i; 742 struct drm_gem_vram_object *gbo; 743 744 if (!old_state->fb) 745 return; 746 747 for (i = 0; i < ARRAY_SIZE(old_state->fb->obj); ++i) { 748 if (!old_state->fb->obj[i]) 749 continue; 750 gbo = drm_gem_vram_of_gem(old_state->fb->obj[i]); 751 drm_gem_vram_unpin(gbo); 752 } 753 } 754 EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb); 755 756 /* 757 * Helpers for struct drm_simple_display_pipe_funcs 758 */ 759 760 /** 761 * drm_gem_vram_simple_display_pipe_prepare_fb() - \ 762 * Implements &struct drm_simple_display_pipe_funcs.prepare_fb 763 * @pipe: a simple display pipe 764 * @new_state: the plane's new state 765 * 766 * During plane updates, this function pins the GEM VRAM 767 * objects of the plane's new framebuffer to VRAM. Call 768 * drm_gem_vram_simple_display_pipe_cleanup_fb() to unpin them. 769 * 770 * Returns: 771 * 0 on success, or 772 * a negative errno code otherwise. 773 */ 774 int drm_gem_vram_simple_display_pipe_prepare_fb( 775 struct drm_simple_display_pipe *pipe, 776 struct drm_plane_state *new_state) 777 { 778 return drm_gem_vram_plane_helper_prepare_fb(&pipe->plane, new_state); 779 } 780 EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb); 781 782 /** 783 * drm_gem_vram_simple_display_pipe_cleanup_fb() - \ 784 * Implements &struct drm_simple_display_pipe_funcs.cleanup_fb 785 * @pipe: a simple display pipe 786 * @old_state: the plane's old state 787 * 788 * During plane updates, this function unpins the GEM VRAM 789 * objects of the plane's old framebuffer from VRAM. Complements 790 * drm_gem_vram_simple_display_pipe_prepare_fb(). 791 */ 792 void drm_gem_vram_simple_display_pipe_cleanup_fb( 793 struct drm_simple_display_pipe *pipe, 794 struct drm_plane_state *old_state) 795 { 796 drm_gem_vram_plane_helper_cleanup_fb(&pipe->plane, old_state); 797 } 798 EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb); 799 800 /* 801 * PRIME helpers 802 */ 803 804 /** 805 * drm_gem_vram_object_pin() - \ 806 Implements &struct drm_gem_object_funcs.pin 807 * @gem: The GEM object to pin 808 * 809 * Returns: 810 * 0 on success, or 811 * a negative errno code otherwise. 812 */ 813 static int drm_gem_vram_object_pin(struct drm_gem_object *gem) 814 { 815 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); 816 817 /* Fbdev console emulation is the use case of these PRIME 818 * helpers. This may involve updating a hardware buffer from 819 * a shadow FB. We pin the buffer to it's current location 820 * (either video RAM or system memory) to prevent it from 821 * being relocated during the update operation. If you require 822 * the buffer to be pinned to VRAM, implement a callback that 823 * sets the flags accordingly. 824 */ 825 return drm_gem_vram_pin(gbo, 0); 826 } 827 828 /** 829 * drm_gem_vram_object_unpin() - \ 830 Implements &struct drm_gem_object_funcs.unpin 831 * @gem: The GEM object to unpin 832 */ 833 static void drm_gem_vram_object_unpin(struct drm_gem_object *gem) 834 { 835 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); 836 837 drm_gem_vram_unpin(gbo); 838 } 839 840 /** 841 * drm_gem_vram_object_vmap() - 842 * Implements &struct drm_gem_object_funcs.vmap 843 * @gem: The GEM object to map 844 * @map: Returns the kernel virtual address of the VRAM GEM object's backing 845 * store. 846 * 847 * Returns: 848 * 0 on success, or a negative error code otherwise. 849 */ 850 static int drm_gem_vram_object_vmap(struct drm_gem_object *gem, struct dma_buf_map *map) 851 { 852 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); 853 854 return drm_gem_vram_vmap(gbo, map); 855 } 856 857 /** 858 * drm_gem_vram_object_vunmap() - 859 * Implements &struct drm_gem_object_funcs.vunmap 860 * @gem: The GEM object to unmap 861 * @map: Kernel virtual address where the VRAM GEM object was mapped 862 */ 863 static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem, struct dma_buf_map *map) 864 { 865 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); 866 867 drm_gem_vram_vunmap(gbo, map); 868 } 869 870 /* 871 * GEM object funcs 872 */ 873 874 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = { 875 .free = drm_gem_vram_object_free, 876 .pin = drm_gem_vram_object_pin, 877 .unpin = drm_gem_vram_object_unpin, 878 .vmap = drm_gem_vram_object_vmap, 879 .vunmap = drm_gem_vram_object_vunmap, 880 .mmap = drm_gem_ttm_mmap, 881 .print_info = drm_gem_ttm_print_info, 882 }; 883 884 /* 885 * VRAM memory manager 886 */ 887 888 /* 889 * TTM TT 890 */ 891 892 static void bo_driver_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt) 893 { 894 ttm_tt_destroy_common(bdev, tt); 895 ttm_tt_fini(tt); 896 kfree(tt); 897 } 898 899 /* 900 * TTM BO device 901 */ 902 903 static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo, 904 uint32_t page_flags) 905 { 906 struct ttm_tt *tt; 907 int ret; 908 909 tt = kzalloc(sizeof(*tt), GFP_KERNEL); 910 if (!tt) 911 return NULL; 912 913 ret = ttm_tt_init(tt, bo, page_flags, ttm_cached); 914 if (ret < 0) 915 goto err_ttm_tt_init; 916 917 return tt; 918 919 err_ttm_tt_init: 920 kfree(tt); 921 return NULL; 922 } 923 924 static void bo_driver_evict_flags(struct ttm_buffer_object *bo, 925 struct ttm_placement *placement) 926 { 927 struct drm_gem_vram_object *gbo; 928 929 /* TTM may pass BOs that are not GEM VRAM BOs. */ 930 if (!drm_is_gem_vram(bo)) 931 return; 932 933 gbo = drm_gem_vram_of_bo(bo); 934 935 drm_gem_vram_bo_driver_evict_flags(gbo, placement); 936 } 937 938 static void bo_driver_delete_mem_notify(struct ttm_buffer_object *bo) 939 { 940 struct drm_gem_vram_object *gbo; 941 942 /* TTM may pass BOs that are not GEM VRAM BOs. */ 943 if (!drm_is_gem_vram(bo)) 944 return; 945 946 gbo = drm_gem_vram_of_bo(bo); 947 948 drm_gem_vram_bo_driver_move_notify(gbo); 949 } 950 951 static int bo_driver_move(struct ttm_buffer_object *bo, 952 bool evict, 953 struct ttm_operation_ctx *ctx, 954 struct ttm_resource *new_mem, 955 struct ttm_place *hop) 956 { 957 struct drm_gem_vram_object *gbo; 958 959 gbo = drm_gem_vram_of_bo(bo); 960 961 return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem); 962 } 963 964 static int bo_driver_io_mem_reserve(struct ttm_device *bdev, 965 struct ttm_resource *mem) 966 { 967 struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev); 968 969 switch (mem->mem_type) { 970 case TTM_PL_SYSTEM: /* nothing to do */ 971 break; 972 case TTM_PL_VRAM: 973 mem->bus.offset = (mem->start << PAGE_SHIFT) + vmm->vram_base; 974 mem->bus.is_iomem = true; 975 mem->bus.caching = ttm_write_combined; 976 break; 977 default: 978 return -EINVAL; 979 } 980 981 return 0; 982 } 983 984 static struct ttm_device_funcs bo_driver = { 985 .ttm_tt_create = bo_driver_ttm_tt_create, 986 .ttm_tt_destroy = bo_driver_ttm_tt_destroy, 987 .eviction_valuable = ttm_bo_eviction_valuable, 988 .evict_flags = bo_driver_evict_flags, 989 .move = bo_driver_move, 990 .delete_mem_notify = bo_driver_delete_mem_notify, 991 .io_mem_reserve = bo_driver_io_mem_reserve, 992 }; 993 994 /* 995 * struct drm_vram_mm 996 */ 997 998 static int drm_vram_mm_debugfs(struct seq_file *m, void *data) 999 { 1000 struct drm_info_node *node = (struct drm_info_node *) m->private; 1001 struct drm_vram_mm *vmm = node->minor->dev->vram_mm; 1002 struct ttm_resource_manager *man = ttm_manager_type(&vmm->bdev, TTM_PL_VRAM); 1003 struct drm_printer p = drm_seq_file_printer(m); 1004 1005 ttm_resource_manager_debug(man, &p); 1006 return 0; 1007 } 1008 1009 static const struct drm_info_list drm_vram_mm_debugfs_list[] = { 1010 { "vram-mm", drm_vram_mm_debugfs, 0, NULL }, 1011 }; 1012 1013 /** 1014 * drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file. 1015 * 1016 * @minor: drm minor device. 1017 * 1018 */ 1019 void drm_vram_mm_debugfs_init(struct drm_minor *minor) 1020 { 1021 drm_debugfs_create_files(drm_vram_mm_debugfs_list, 1022 ARRAY_SIZE(drm_vram_mm_debugfs_list), 1023 minor->debugfs_root, minor); 1024 } 1025 EXPORT_SYMBOL(drm_vram_mm_debugfs_init); 1026 1027 static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, 1028 uint64_t vram_base, size_t vram_size) 1029 { 1030 int ret; 1031 1032 vmm->vram_base = vram_base; 1033 vmm->vram_size = vram_size; 1034 1035 ret = ttm_device_init(&vmm->bdev, &bo_driver, dev->dev, 1036 dev->anon_inode->i_mapping, 1037 dev->vma_offset_manager, 1038 false, true); 1039 if (ret) 1040 return ret; 1041 1042 ret = ttm_range_man_init(&vmm->bdev, TTM_PL_VRAM, 1043 false, vram_size >> PAGE_SHIFT); 1044 if (ret) 1045 return ret; 1046 1047 return 0; 1048 } 1049 1050 static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm) 1051 { 1052 ttm_range_man_fini(&vmm->bdev, TTM_PL_VRAM); 1053 ttm_device_fini(&vmm->bdev); 1054 } 1055 1056 /* 1057 * Helpers for integration with struct drm_device 1058 */ 1059 1060 /* deprecated; use drmm_vram_mm_init() */ 1061 struct drm_vram_mm *drm_vram_helper_alloc_mm( 1062 struct drm_device *dev, uint64_t vram_base, size_t vram_size) 1063 { 1064 int ret; 1065 1066 if (WARN_ON(dev->vram_mm)) 1067 return dev->vram_mm; 1068 1069 dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL); 1070 if (!dev->vram_mm) 1071 return ERR_PTR(-ENOMEM); 1072 1073 ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size); 1074 if (ret) 1075 goto err_kfree; 1076 1077 return dev->vram_mm; 1078 1079 err_kfree: 1080 kfree(dev->vram_mm); 1081 dev->vram_mm = NULL; 1082 return ERR_PTR(ret); 1083 } 1084 EXPORT_SYMBOL(drm_vram_helper_alloc_mm); 1085 1086 void drm_vram_helper_release_mm(struct drm_device *dev) 1087 { 1088 if (!dev->vram_mm) 1089 return; 1090 1091 drm_vram_mm_cleanup(dev->vram_mm); 1092 kfree(dev->vram_mm); 1093 dev->vram_mm = NULL; 1094 } 1095 EXPORT_SYMBOL(drm_vram_helper_release_mm); 1096 1097 static void drm_vram_mm_release(struct drm_device *dev, void *ptr) 1098 { 1099 drm_vram_helper_release_mm(dev); 1100 } 1101 1102 /** 1103 * drmm_vram_helper_init - Initializes a device's instance of 1104 * &struct drm_vram_mm 1105 * @dev: the DRM device 1106 * @vram_base: the base address of the video memory 1107 * @vram_size: the size of the video memory in bytes 1108 * 1109 * Creates a new instance of &struct drm_vram_mm and stores it in 1110 * struct &drm_device.vram_mm. The instance is auto-managed and cleaned 1111 * up as part of device cleanup. Calling this function multiple times 1112 * will generate an error message. 1113 * 1114 * Returns: 1115 * 0 on success, or a negative errno code otherwise. 1116 */ 1117 int drmm_vram_helper_init(struct drm_device *dev, uint64_t vram_base, 1118 size_t vram_size) 1119 { 1120 struct drm_vram_mm *vram_mm; 1121 1122 if (drm_WARN_ON_ONCE(dev, dev->vram_mm)) 1123 return 0; 1124 1125 vram_mm = drm_vram_helper_alloc_mm(dev, vram_base, vram_size); 1126 if (IS_ERR(vram_mm)) 1127 return PTR_ERR(vram_mm); 1128 return drmm_add_action_or_reset(dev, drm_vram_mm_release, NULL); 1129 } 1130 EXPORT_SYMBOL(drmm_vram_helper_init); 1131 1132 /* 1133 * Mode-config helpers 1134 */ 1135 1136 static enum drm_mode_status 1137 drm_vram_helper_mode_valid_internal(struct drm_device *dev, 1138 const struct drm_display_mode *mode, 1139 unsigned long max_bpp) 1140 { 1141 struct drm_vram_mm *vmm = dev->vram_mm; 1142 unsigned long fbsize, fbpages, max_fbpages; 1143 1144 if (WARN_ON(!dev->vram_mm)) 1145 return MODE_BAD; 1146 1147 max_fbpages = (vmm->vram_size / 2) >> PAGE_SHIFT; 1148 1149 fbsize = mode->hdisplay * mode->vdisplay * max_bpp; 1150 fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE); 1151 1152 if (fbpages > max_fbpages) 1153 return MODE_MEM; 1154 1155 return MODE_OK; 1156 } 1157 1158 /** 1159 * drm_vram_helper_mode_valid - Tests if a display mode's 1160 * framebuffer fits into the available video memory. 1161 * @dev: the DRM device 1162 * @mode: the mode to test 1163 * 1164 * This function tests if enough video memory is available for using the 1165 * specified display mode. Atomic modesetting requires importing the 1166 * designated framebuffer into video memory before evicting the active 1167 * one. Hence, any framebuffer may consume at most half of the available 1168 * VRAM. Display modes that require a larger framebuffer can not be used, 1169 * even if the CRTC does support them. Each framebuffer is assumed to 1170 * have 32-bit color depth. 1171 * 1172 * Note: 1173 * The function can only test if the display mode is supported in 1174 * general. If there are too many framebuffers pinned to video memory, 1175 * a display mode may still not be usable in practice. The color depth of 1176 * 32-bit fits all current use case. A more flexible test can be added 1177 * when necessary. 1178 * 1179 * Returns: 1180 * MODE_OK if the display mode is supported, or an error code of type 1181 * enum drm_mode_status otherwise. 1182 */ 1183 enum drm_mode_status 1184 drm_vram_helper_mode_valid(struct drm_device *dev, 1185 const struct drm_display_mode *mode) 1186 { 1187 static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */ 1188 1189 return drm_vram_helper_mode_valid_internal(dev, mode, max_bpp); 1190 } 1191 EXPORT_SYMBOL(drm_vram_helper_mode_valid); 1192 1193 MODULE_DESCRIPTION("DRM VRAM memory-management helpers"); 1194 MODULE_LICENSE("GPL"); 1195