1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 #include <linux/module.h> 4 5 #include <drm/drm_debugfs.h> 6 #include <drm/drm_device.h> 7 #include <drm/drm_drv.h> 8 #include <drm/drm_file.h> 9 #include <drm/drm_framebuffer.h> 10 #include <drm/drm_gem_framebuffer_helper.h> 11 #include <drm/drm_gem_ttm_helper.h> 12 #include <drm/drm_gem_vram_helper.h> 13 #include <drm/drm_managed.h> 14 #include <drm/drm_mode.h> 15 #include <drm/drm_plane.h> 16 #include <drm/drm_prime.h> 17 #include <drm/drm_simple_kms_helper.h> 18 #include <drm/ttm/ttm_page_alloc.h> 19 20 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; 21 22 /** 23 * DOC: overview 24 * 25 * This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM 26 * buffer object that is backed by video RAM (VRAM). It can be used for 27 * framebuffer devices with dedicated memory. 28 * 29 * The data structure &struct drm_vram_mm and its helpers implement a memory 30 * manager for simple framebuffer devices with dedicated video memory. GEM 31 * VRAM buffer objects are either placed in the video memory or remain evicted 32 * to system memory. 33 * 34 * With the GEM interface userspace applications create, manage and destroy 35 * graphics buffers, such as an on-screen framebuffer. GEM does not provide 36 * an implementation of these interfaces. It's up to the DRM driver to 37 * provide an implementation that suits the hardware. If the hardware device 38 * contains dedicated video memory, the DRM driver can use the VRAM helper 39 * library. Each active buffer object is stored in video RAM. Active 40 * buffer are used for drawing the current frame, typically something like 41 * the frame's scanout buffer or the cursor image. If there's no more space 42 * left in VRAM, inactive GEM objects can be moved to system memory. 43 * 44 * To initialize the VRAM helper library call drmm_vram_helper_alloc_mm(). 45 * The function allocates and initializes an instance of &struct drm_vram_mm 46 * in &struct drm_device.vram_mm . Use &DRM_GEM_VRAM_DRIVER to initialize 47 * &struct drm_driver and &DRM_VRAM_MM_FILE_OPERATIONS to initialize 48 * &struct file_operations; as illustrated below. 49 * 50 * .. code-block:: c 51 * 52 * struct file_operations fops ={ 53 * .owner = THIS_MODULE, 54 * DRM_VRAM_MM_FILE_OPERATION 55 * }; 56 * struct drm_driver drv = { 57 * .driver_feature = DRM_ ... , 58 * .fops = &fops, 59 * DRM_GEM_VRAM_DRIVER 60 * }; 61 * 62 * int init_drm_driver() 63 * { 64 * struct drm_device *dev; 65 * uint64_t vram_base; 66 * unsigned long vram_size; 67 * int ret; 68 * 69 * // setup device, vram base and size 70 * // ... 71 * 72 * ret = drmm_vram_helper_alloc_mm(dev, vram_base, vram_size); 73 * if (ret) 74 * return ret; 75 * return 0; 76 * } 77 * 78 * This creates an instance of &struct drm_vram_mm, exports DRM userspace 79 * interfaces for GEM buffer management and initializes file operations to 80 * allow for accessing created GEM buffers. With this setup, the DRM driver 81 * manages an area of video RAM with VRAM MM and provides GEM VRAM objects 82 * to userspace. 83 * 84 * You don't have to clean up the instance of VRAM MM. 85 * drmm_vram_helper_alloc_mm() is a managed interface that installs a 86 * clean-up handler to run during the DRM device's release. 87 * 88 * For drawing or scanout operations, rsp. buffer objects have to be pinned 89 * in video RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or 90 * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system 91 * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards. 92 * 93 * A buffer object that is pinned in video RAM has a fixed address within that 94 * memory region. Call drm_gem_vram_offset() to retrieve this value. Typically 95 * it's used to program the hardware's scanout engine for framebuffers, set 96 * the cursor overlay's image for a mouse cursor, or use it as input to the 97 * hardware's draing engine. 98 * 99 * To access a buffer object's memory from the DRM driver, call 100 * drm_gem_vram_vmap(). It maps the buffer into kernel address 101 * space and returns the memory address. Use drm_gem_vram_vunmap() to 102 * release the mapping. 103 */ 104 105 /* 106 * Buffer-objects helpers 107 */ 108 109 static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo) 110 { 111 /* We got here via ttm_bo_put(), which means that the 112 * TTM buffer object in 'bo' has already been cleaned 113 * up; only release the GEM object. 114 */ 115 116 WARN_ON(gbo->kmap_use_count); 117 WARN_ON(gbo->kmap.virtual); 118 119 drm_gem_object_release(&gbo->bo.base); 120 } 121 122 static void drm_gem_vram_destroy(struct drm_gem_vram_object *gbo) 123 { 124 drm_gem_vram_cleanup(gbo); 125 kfree(gbo); 126 } 127 128 static void ttm_buffer_object_destroy(struct ttm_buffer_object *bo) 129 { 130 struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo); 131 132 drm_gem_vram_destroy(gbo); 133 } 134 135 static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo, 136 unsigned long pl_flag) 137 { 138 u32 invariant_flags = 0; 139 unsigned int i; 140 unsigned int c = 0; 141 142 if (pl_flag & DRM_GEM_VRAM_PL_FLAG_TOPDOWN) 143 invariant_flags = TTM_PL_FLAG_TOPDOWN; 144 145 gbo->placement.placement = gbo->placements; 146 gbo->placement.busy_placement = gbo->placements; 147 148 if (pl_flag & DRM_GEM_VRAM_PL_FLAG_VRAM) { 149 gbo->placements[c].mem_type = TTM_PL_VRAM; 150 gbo->placements[c++].flags = invariant_flags; 151 } 152 153 if (pl_flag & DRM_GEM_VRAM_PL_FLAG_SYSTEM || !c) { 154 gbo->placements[c].mem_type = TTM_PL_SYSTEM; 155 gbo->placements[c++].flags = invariant_flags; 156 } 157 158 gbo->placement.num_placement = c; 159 gbo->placement.num_busy_placement = c; 160 161 for (i = 0; i < c; ++i) { 162 gbo->placements[i].fpfn = 0; 163 gbo->placements[i].lpfn = 0; 164 } 165 } 166 167 /** 168 * drm_gem_vram_create() - Creates a VRAM-backed GEM object 169 * @dev: the DRM device 170 * @size: the buffer size in bytes 171 * @pg_align: the buffer's alignment in multiples of the page size 172 * 173 * GEM objects are allocated by calling struct drm_driver.gem_create_object, 174 * if set. Otherwise kzalloc() will be used. Drivers can set their own GEM 175 * object functions in struct drm_driver.gem_create_object. If no functions 176 * are set, the new GEM object will use the default functions from GEM VRAM 177 * helpers. 178 * 179 * Returns: 180 * A new instance of &struct drm_gem_vram_object on success, or 181 * an ERR_PTR()-encoded error code otherwise. 182 */ 183 struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, 184 size_t size, 185 unsigned long pg_align) 186 { 187 struct drm_gem_vram_object *gbo; 188 struct drm_gem_object *gem; 189 struct drm_vram_mm *vmm = dev->vram_mm; 190 struct ttm_bo_device *bdev; 191 int ret; 192 size_t acc_size; 193 194 if (WARN_ONCE(!vmm, "VRAM MM not initialized")) 195 return ERR_PTR(-EINVAL); 196 197 if (dev->driver->gem_create_object) { 198 gem = dev->driver->gem_create_object(dev, size); 199 if (!gem) 200 return ERR_PTR(-ENOMEM); 201 gbo = drm_gem_vram_of_gem(gem); 202 } else { 203 gbo = kzalloc(sizeof(*gbo), GFP_KERNEL); 204 if (!gbo) 205 return ERR_PTR(-ENOMEM); 206 gem = &gbo->bo.base; 207 } 208 209 if (!gem->funcs) 210 gem->funcs = &drm_gem_vram_object_funcs; 211 212 ret = drm_gem_object_init(dev, gem, size); 213 if (ret) { 214 kfree(gbo); 215 return ERR_PTR(ret); 216 } 217 218 bdev = &vmm->bdev; 219 acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo)); 220 221 gbo->bo.bdev = bdev; 222 drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM); 223 224 /* 225 * A failing ttm_bo_init will call ttm_buffer_object_destroy 226 * to release gbo->bo.base and kfree gbo. 227 */ 228 ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device, 229 &gbo->placement, pg_align, false, acc_size, 230 NULL, NULL, ttm_buffer_object_destroy); 231 if (ret) 232 return ERR_PTR(ret); 233 234 return gbo; 235 } 236 EXPORT_SYMBOL(drm_gem_vram_create); 237 238 /** 239 * drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object 240 * @gbo: the GEM VRAM object 241 * 242 * See ttm_bo_put() for more information. 243 */ 244 void drm_gem_vram_put(struct drm_gem_vram_object *gbo) 245 { 246 ttm_bo_put(&gbo->bo); 247 } 248 EXPORT_SYMBOL(drm_gem_vram_put); 249 250 /** 251 * drm_gem_vram_mmap_offset() - Returns a GEM VRAM object's mmap offset 252 * @gbo: the GEM VRAM object 253 * 254 * See drm_vma_node_offset_addr() for more information. 255 * 256 * Returns: 257 * The buffer object's offset for userspace mappings on success, or 258 * 0 if no offset is allocated. 259 */ 260 u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo) 261 { 262 return drm_vma_node_offset_addr(&gbo->bo.base.vma_node); 263 } 264 EXPORT_SYMBOL(drm_gem_vram_mmap_offset); 265 266 static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo) 267 { 268 /* Keep TTM behavior for now, remove when drivers are audited */ 269 if (WARN_ON_ONCE(!gbo->bo.mem.mm_node)) 270 return 0; 271 272 return gbo->bo.mem.start; 273 } 274 275 /** 276 * drm_gem_vram_offset() - \ 277 Returns a GEM VRAM object's offset in video memory 278 * @gbo: the GEM VRAM object 279 * 280 * This function returns the buffer object's offset in the device's video 281 * memory. The buffer object has to be pinned to %TTM_PL_VRAM. 282 * 283 * Returns: 284 * The buffer object's offset in video memory on success, or 285 * a negative errno code otherwise. 286 */ 287 s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo) 288 { 289 if (WARN_ON_ONCE(!gbo->bo.pin_count)) 290 return (s64)-ENODEV; 291 return drm_gem_vram_pg_offset(gbo) << PAGE_SHIFT; 292 } 293 EXPORT_SYMBOL(drm_gem_vram_offset); 294 295 static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo, 296 unsigned long pl_flag) 297 { 298 struct ttm_operation_ctx ctx = { false, false }; 299 int ret; 300 301 if (gbo->bo.pin_count) 302 goto out; 303 304 if (pl_flag) 305 drm_gem_vram_placement(gbo, pl_flag); 306 307 ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx); 308 if (ret < 0) 309 return ret; 310 311 out: 312 ttm_bo_pin(&gbo->bo); 313 314 return 0; 315 } 316 317 /** 318 * drm_gem_vram_pin() - Pins a GEM VRAM object in a region. 319 * @gbo: the GEM VRAM object 320 * @pl_flag: a bitmask of possible memory regions 321 * 322 * Pinning a buffer object ensures that it is not evicted from 323 * a memory region. A pinned buffer object has to be unpinned before 324 * it can be pinned to another region. If the pl_flag argument is 0, 325 * the buffer is pinned at its current location (video RAM or system 326 * memory). 327 * 328 * Small buffer objects, such as cursor images, can lead to memory 329 * fragmentation if they are pinned in the middle of video RAM. This 330 * is especially a problem on devices with only a small amount of 331 * video RAM. Fragmentation can prevent the primary framebuffer from 332 * fitting in, even though there's enough memory overall. The modifier 333 * DRM_GEM_VRAM_PL_FLAG_TOPDOWN marks the buffer object to be pinned 334 * at the high end of the memory region to avoid fragmentation. 335 * 336 * Returns: 337 * 0 on success, or 338 * a negative error code otherwise. 339 */ 340 int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag) 341 { 342 int ret; 343 344 ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); 345 if (ret) 346 return ret; 347 ret = drm_gem_vram_pin_locked(gbo, pl_flag); 348 ttm_bo_unreserve(&gbo->bo); 349 350 return ret; 351 } 352 EXPORT_SYMBOL(drm_gem_vram_pin); 353 354 static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo) 355 { 356 ttm_bo_unpin(&gbo->bo); 357 } 358 359 /** 360 * drm_gem_vram_unpin() - Unpins a GEM VRAM object 361 * @gbo: the GEM VRAM object 362 * 363 * Returns: 364 * 0 on success, or 365 * a negative error code otherwise. 366 */ 367 int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo) 368 { 369 int ret; 370 371 ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); 372 if (ret) 373 return ret; 374 375 drm_gem_vram_unpin_locked(gbo); 376 ttm_bo_unreserve(&gbo->bo); 377 378 return 0; 379 } 380 EXPORT_SYMBOL(drm_gem_vram_unpin); 381 382 static void *drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo, 383 bool map, bool *is_iomem) 384 { 385 int ret; 386 struct ttm_bo_kmap_obj *kmap = &gbo->kmap; 387 388 if (gbo->kmap_use_count > 0) 389 goto out; 390 391 if (kmap->virtual || !map) 392 goto out; 393 394 ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap); 395 if (ret) 396 return ERR_PTR(ret); 397 398 out: 399 if (!kmap->virtual) { 400 if (is_iomem) 401 *is_iomem = false; 402 return NULL; /* not mapped; don't increment ref */ 403 } 404 ++gbo->kmap_use_count; 405 if (is_iomem) 406 return ttm_kmap_obj_virtual(kmap, is_iomem); 407 return kmap->virtual; 408 } 409 410 static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo) 411 { 412 if (WARN_ON_ONCE(!gbo->kmap_use_count)) 413 return; 414 if (--gbo->kmap_use_count > 0) 415 return; 416 417 /* 418 * Permanently mapping and unmapping buffers adds overhead from 419 * updating the page tables and creates debugging output. Therefore, 420 * we delay the actual unmap operation until the BO gets evicted 421 * from memory. See drm_gem_vram_bo_driver_move_notify(). 422 */ 423 } 424 425 /** 426 * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address 427 * space 428 * @gbo: The GEM VRAM object to map 429 * 430 * The vmap function pins a GEM VRAM object to its current location, either 431 * system or video memory, and maps its buffer into kernel address space. 432 * As pinned object cannot be relocated, you should avoid pinning objects 433 * permanently. Call drm_gem_vram_vunmap() with the returned address to 434 * unmap and unpin the GEM VRAM object. 435 * 436 * Returns: 437 * The buffer's virtual address on success, or 438 * an ERR_PTR()-encoded error code otherwise. 439 */ 440 void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo) 441 { 442 int ret; 443 void *base; 444 445 ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); 446 if (ret) 447 return ERR_PTR(ret); 448 449 ret = drm_gem_vram_pin_locked(gbo, 0); 450 if (ret) 451 goto err_ttm_bo_unreserve; 452 base = drm_gem_vram_kmap_locked(gbo, true, NULL); 453 if (IS_ERR(base)) { 454 ret = PTR_ERR(base); 455 goto err_drm_gem_vram_unpin_locked; 456 } 457 458 ttm_bo_unreserve(&gbo->bo); 459 460 return base; 461 462 err_drm_gem_vram_unpin_locked: 463 drm_gem_vram_unpin_locked(gbo); 464 err_ttm_bo_unreserve: 465 ttm_bo_unreserve(&gbo->bo); 466 return ERR_PTR(ret); 467 } 468 EXPORT_SYMBOL(drm_gem_vram_vmap); 469 470 /** 471 * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object 472 * @gbo: The GEM VRAM object to unmap 473 * @vaddr: The mapping's base address as returned by drm_gem_vram_vmap() 474 * 475 * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See 476 * the documentation for drm_gem_vram_vmap() for more information. 477 */ 478 void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr) 479 { 480 int ret; 481 482 ret = ttm_bo_reserve(&gbo->bo, false, false, NULL); 483 if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret)) 484 return; 485 486 drm_gem_vram_kunmap_locked(gbo); 487 drm_gem_vram_unpin_locked(gbo); 488 489 ttm_bo_unreserve(&gbo->bo); 490 } 491 EXPORT_SYMBOL(drm_gem_vram_vunmap); 492 493 /** 494 * drm_gem_vram_fill_create_dumb() - \ 495 Helper for implementing &struct drm_driver.dumb_create 496 * @file: the DRM file 497 * @dev: the DRM device 498 * @pg_align: the buffer's alignment in multiples of the page size 499 * @pitch_align: the scanline's alignment in powers of 2 500 * @args: the arguments as provided to \ 501 &struct drm_driver.dumb_create 502 * 503 * This helper function fills &struct drm_mode_create_dumb, which is used 504 * by &struct drm_driver.dumb_create. Implementations of this interface 505 * should forwards their arguments to this helper, plus the driver-specific 506 * parameters. 507 * 508 * Returns: 509 * 0 on success, or 510 * a negative error code otherwise. 511 */ 512 int drm_gem_vram_fill_create_dumb(struct drm_file *file, 513 struct drm_device *dev, 514 unsigned long pg_align, 515 unsigned long pitch_align, 516 struct drm_mode_create_dumb *args) 517 { 518 size_t pitch, size; 519 struct drm_gem_vram_object *gbo; 520 int ret; 521 u32 handle; 522 523 pitch = args->width * DIV_ROUND_UP(args->bpp, 8); 524 if (pitch_align) { 525 if (WARN_ON_ONCE(!is_power_of_2(pitch_align))) 526 return -EINVAL; 527 pitch = ALIGN(pitch, pitch_align); 528 } 529 size = pitch * args->height; 530 531 size = roundup(size, PAGE_SIZE); 532 if (!size) 533 return -EINVAL; 534 535 gbo = drm_gem_vram_create(dev, size, pg_align); 536 if (IS_ERR(gbo)) 537 return PTR_ERR(gbo); 538 539 ret = drm_gem_handle_create(file, &gbo->bo.base, &handle); 540 if (ret) 541 goto err_drm_gem_object_put; 542 543 drm_gem_object_put(&gbo->bo.base); 544 545 args->pitch = pitch; 546 args->size = size; 547 args->handle = handle; 548 549 return 0; 550 551 err_drm_gem_object_put: 552 drm_gem_object_put(&gbo->bo.base); 553 return ret; 554 } 555 EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb); 556 557 /* 558 * Helpers for struct ttm_bo_driver 559 */ 560 561 static bool drm_is_gem_vram(struct ttm_buffer_object *bo) 562 { 563 return (bo->destroy == ttm_buffer_object_destroy); 564 } 565 566 static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo, 567 struct ttm_placement *pl) 568 { 569 drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM); 570 *pl = gbo->placement; 571 } 572 573 static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo, 574 bool evict, 575 struct ttm_resource *new_mem) 576 { 577 struct ttm_bo_kmap_obj *kmap = &gbo->kmap; 578 579 if (WARN_ON_ONCE(gbo->kmap_use_count)) 580 return; 581 582 if (!kmap->virtual) 583 return; 584 ttm_bo_kunmap(kmap); 585 kmap->virtual = NULL; 586 } 587 588 static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo, 589 bool evict, 590 struct ttm_operation_ctx *ctx, 591 struct ttm_resource *new_mem) 592 { 593 return ttm_bo_move_memcpy(&gbo->bo, ctx, new_mem); 594 } 595 596 /* 597 * Helpers for struct drm_gem_object_funcs 598 */ 599 600 /** 601 * drm_gem_vram_object_free() - \ 602 Implements &struct drm_gem_object_funcs.free 603 * @gem: GEM object. Refers to &struct drm_gem_vram_object.gem 604 */ 605 static void drm_gem_vram_object_free(struct drm_gem_object *gem) 606 { 607 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); 608 609 drm_gem_vram_put(gbo); 610 } 611 612 /* 613 * Helpers for dump buffers 614 */ 615 616 /** 617 * drm_gem_vram_driver_create_dumb() - \ 618 Implements &struct drm_driver.dumb_create 619 * @file: the DRM file 620 * @dev: the DRM device 621 * @args: the arguments as provided to \ 622 &struct drm_driver.dumb_create 623 * 624 * This function requires the driver to use @drm_device.vram_mm for its 625 * instance of VRAM MM. 626 * 627 * Returns: 628 * 0 on success, or 629 * a negative error code otherwise. 630 */ 631 int drm_gem_vram_driver_dumb_create(struct drm_file *file, 632 struct drm_device *dev, 633 struct drm_mode_create_dumb *args) 634 { 635 if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized")) 636 return -EINVAL; 637 638 return drm_gem_vram_fill_create_dumb(file, dev, 0, 0, args); 639 } 640 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create); 641 642 /** 643 * drm_gem_vram_driver_dumb_mmap_offset() - \ 644 Implements &struct drm_driver.dumb_mmap_offset 645 * @file: DRM file pointer. 646 * @dev: DRM device. 647 * @handle: GEM handle 648 * @offset: Returns the mapping's memory offset on success 649 * 650 * Returns: 651 * 0 on success, or 652 * a negative errno code otherwise. 653 */ 654 int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file, 655 struct drm_device *dev, 656 uint32_t handle, uint64_t *offset) 657 { 658 struct drm_gem_object *gem; 659 struct drm_gem_vram_object *gbo; 660 661 gem = drm_gem_object_lookup(file, handle); 662 if (!gem) 663 return -ENOENT; 664 665 gbo = drm_gem_vram_of_gem(gem); 666 *offset = drm_gem_vram_mmap_offset(gbo); 667 668 drm_gem_object_put(gem); 669 670 return 0; 671 } 672 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset); 673 674 /* 675 * Helpers for struct drm_plane_helper_funcs 676 */ 677 678 /** 679 * drm_gem_vram_plane_helper_prepare_fb() - \ 680 * Implements &struct drm_plane_helper_funcs.prepare_fb 681 * @plane: a DRM plane 682 * @new_state: the plane's new state 683 * 684 * During plane updates, this function sets the plane's fence and 685 * pins the GEM VRAM objects of the plane's new framebuffer to VRAM. 686 * Call drm_gem_vram_plane_helper_cleanup_fb() to unpin them. 687 * 688 * Returns: 689 * 0 on success, or 690 * a negative errno code otherwise. 691 */ 692 int 693 drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane, 694 struct drm_plane_state *new_state) 695 { 696 size_t i; 697 struct drm_gem_vram_object *gbo; 698 int ret; 699 700 if (!new_state->fb) 701 return 0; 702 703 for (i = 0; i < ARRAY_SIZE(new_state->fb->obj); ++i) { 704 if (!new_state->fb->obj[i]) 705 continue; 706 gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]); 707 ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM); 708 if (ret) 709 goto err_drm_gem_vram_unpin; 710 } 711 712 ret = drm_gem_fb_prepare_fb(plane, new_state); 713 if (ret) 714 goto err_drm_gem_vram_unpin; 715 716 return 0; 717 718 err_drm_gem_vram_unpin: 719 while (i) { 720 --i; 721 gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]); 722 drm_gem_vram_unpin(gbo); 723 } 724 return ret; 725 } 726 EXPORT_SYMBOL(drm_gem_vram_plane_helper_prepare_fb); 727 728 /** 729 * drm_gem_vram_plane_helper_cleanup_fb() - \ 730 * Implements &struct drm_plane_helper_funcs.cleanup_fb 731 * @plane: a DRM plane 732 * @old_state: the plane's old state 733 * 734 * During plane updates, this function unpins the GEM VRAM 735 * objects of the plane's old framebuffer from VRAM. Complements 736 * drm_gem_vram_plane_helper_prepare_fb(). 737 */ 738 void 739 drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane, 740 struct drm_plane_state *old_state) 741 { 742 size_t i; 743 struct drm_gem_vram_object *gbo; 744 745 if (!old_state->fb) 746 return; 747 748 for (i = 0; i < ARRAY_SIZE(old_state->fb->obj); ++i) { 749 if (!old_state->fb->obj[i]) 750 continue; 751 gbo = drm_gem_vram_of_gem(old_state->fb->obj[i]); 752 drm_gem_vram_unpin(gbo); 753 } 754 } 755 EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb); 756 757 /* 758 * Helpers for struct drm_simple_display_pipe_funcs 759 */ 760 761 /** 762 * drm_gem_vram_simple_display_pipe_prepare_fb() - \ 763 * Implements &struct drm_simple_display_pipe_funcs.prepare_fb 764 * @pipe: a simple display pipe 765 * @new_state: the plane's new state 766 * 767 * During plane updates, this function pins the GEM VRAM 768 * objects of the plane's new framebuffer to VRAM. Call 769 * drm_gem_vram_simple_display_pipe_cleanup_fb() to unpin them. 770 * 771 * Returns: 772 * 0 on success, or 773 * a negative errno code otherwise. 774 */ 775 int drm_gem_vram_simple_display_pipe_prepare_fb( 776 struct drm_simple_display_pipe *pipe, 777 struct drm_plane_state *new_state) 778 { 779 return drm_gem_vram_plane_helper_prepare_fb(&pipe->plane, new_state); 780 } 781 EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb); 782 783 /** 784 * drm_gem_vram_simple_display_pipe_cleanup_fb() - \ 785 * Implements &struct drm_simple_display_pipe_funcs.cleanup_fb 786 * @pipe: a simple display pipe 787 * @old_state: the plane's old state 788 * 789 * During plane updates, this function unpins the GEM VRAM 790 * objects of the plane's old framebuffer from VRAM. Complements 791 * drm_gem_vram_simple_display_pipe_prepare_fb(). 792 */ 793 void drm_gem_vram_simple_display_pipe_cleanup_fb( 794 struct drm_simple_display_pipe *pipe, 795 struct drm_plane_state *old_state) 796 { 797 drm_gem_vram_plane_helper_cleanup_fb(&pipe->plane, old_state); 798 } 799 EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb); 800 801 /* 802 * PRIME helpers 803 */ 804 805 /** 806 * drm_gem_vram_object_pin() - \ 807 Implements &struct drm_gem_object_funcs.pin 808 * @gem: The GEM object to pin 809 * 810 * Returns: 811 * 0 on success, or 812 * a negative errno code otherwise. 813 */ 814 static int drm_gem_vram_object_pin(struct drm_gem_object *gem) 815 { 816 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); 817 818 /* Fbdev console emulation is the use case of these PRIME 819 * helpers. This may involve updating a hardware buffer from 820 * a shadow FB. We pin the buffer to it's current location 821 * (either video RAM or system memory) to prevent it from 822 * being relocated during the update operation. If you require 823 * the buffer to be pinned to VRAM, implement a callback that 824 * sets the flags accordingly. 825 */ 826 return drm_gem_vram_pin(gbo, 0); 827 } 828 829 /** 830 * drm_gem_vram_object_unpin() - \ 831 Implements &struct drm_gem_object_funcs.unpin 832 * @gem: The GEM object to unpin 833 */ 834 static void drm_gem_vram_object_unpin(struct drm_gem_object *gem) 835 { 836 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); 837 838 drm_gem_vram_unpin(gbo); 839 } 840 841 /** 842 * drm_gem_vram_object_vmap() - \ 843 Implements &struct drm_gem_object_funcs.vmap 844 * @gem: The GEM object to map 845 * 846 * Returns: 847 * The buffers virtual address on success, or 848 * NULL otherwise. 849 */ 850 static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem) 851 { 852 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); 853 void *base; 854 855 base = drm_gem_vram_vmap(gbo); 856 if (IS_ERR(base)) 857 return NULL; 858 return base; 859 } 860 861 /** 862 * drm_gem_vram_object_vunmap() - \ 863 Implements &struct drm_gem_object_funcs.vunmap 864 * @gem: The GEM object to unmap 865 * @vaddr: The mapping's base address 866 */ 867 static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem, 868 void *vaddr) 869 { 870 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); 871 872 drm_gem_vram_vunmap(gbo, vaddr); 873 } 874 875 /* 876 * GEM object funcs 877 */ 878 879 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = { 880 .free = drm_gem_vram_object_free, 881 .pin = drm_gem_vram_object_pin, 882 .unpin = drm_gem_vram_object_unpin, 883 .vmap = drm_gem_vram_object_vmap, 884 .vunmap = drm_gem_vram_object_vunmap, 885 .mmap = drm_gem_ttm_mmap, 886 .print_info = drm_gem_ttm_print_info, 887 }; 888 889 /* 890 * VRAM memory manager 891 */ 892 893 /* 894 * TTM TT 895 */ 896 897 static void bo_driver_ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *tt) 898 { 899 ttm_tt_destroy_common(bdev, tt); 900 ttm_tt_fini(tt); 901 kfree(tt); 902 } 903 904 /* 905 * TTM BO device 906 */ 907 908 static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo, 909 uint32_t page_flags) 910 { 911 struct ttm_tt *tt; 912 int ret; 913 914 tt = kzalloc(sizeof(*tt), GFP_KERNEL); 915 if (!tt) 916 return NULL; 917 918 ret = ttm_tt_init(tt, bo, page_flags, ttm_cached); 919 if (ret < 0) 920 goto err_ttm_tt_init; 921 922 return tt; 923 924 err_ttm_tt_init: 925 kfree(tt); 926 return NULL; 927 } 928 929 static void bo_driver_evict_flags(struct ttm_buffer_object *bo, 930 struct ttm_placement *placement) 931 { 932 struct drm_gem_vram_object *gbo; 933 934 /* TTM may pass BOs that are not GEM VRAM BOs. */ 935 if (!drm_is_gem_vram(bo)) 936 return; 937 938 gbo = drm_gem_vram_of_bo(bo); 939 940 drm_gem_vram_bo_driver_evict_flags(gbo, placement); 941 } 942 943 static void bo_driver_move_notify(struct ttm_buffer_object *bo, 944 bool evict, 945 struct ttm_resource *new_mem) 946 { 947 struct drm_gem_vram_object *gbo; 948 949 /* TTM may pass BOs that are not GEM VRAM BOs. */ 950 if (!drm_is_gem_vram(bo)) 951 return; 952 953 gbo = drm_gem_vram_of_bo(bo); 954 955 drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem); 956 } 957 958 static int bo_driver_move(struct ttm_buffer_object *bo, 959 bool evict, 960 struct ttm_operation_ctx *ctx, 961 struct ttm_resource *new_mem) 962 { 963 struct drm_gem_vram_object *gbo; 964 965 gbo = drm_gem_vram_of_bo(bo); 966 967 return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem); 968 } 969 970 static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev, 971 struct ttm_resource *mem) 972 { 973 struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev); 974 975 switch (mem->mem_type) { 976 case TTM_PL_SYSTEM: /* nothing to do */ 977 break; 978 case TTM_PL_VRAM: 979 mem->bus.offset = (mem->start << PAGE_SHIFT) + vmm->vram_base; 980 mem->bus.is_iomem = true; 981 mem->bus.caching = ttm_write_combined; 982 break; 983 default: 984 return -EINVAL; 985 } 986 987 return 0; 988 } 989 990 static struct ttm_bo_driver bo_driver = { 991 .ttm_tt_create = bo_driver_ttm_tt_create, 992 .ttm_tt_destroy = bo_driver_ttm_tt_destroy, 993 .eviction_valuable = ttm_bo_eviction_valuable, 994 .evict_flags = bo_driver_evict_flags, 995 .move = bo_driver_move, 996 .move_notify = bo_driver_move_notify, 997 .io_mem_reserve = bo_driver_io_mem_reserve, 998 }; 999 1000 /* 1001 * struct drm_vram_mm 1002 */ 1003 1004 static int drm_vram_mm_debugfs(struct seq_file *m, void *data) 1005 { 1006 struct drm_info_node *node = (struct drm_info_node *) m->private; 1007 struct drm_vram_mm *vmm = node->minor->dev->vram_mm; 1008 struct ttm_resource_manager *man = ttm_manager_type(&vmm->bdev, TTM_PL_VRAM); 1009 struct drm_printer p = drm_seq_file_printer(m); 1010 1011 ttm_resource_manager_debug(man, &p); 1012 return 0; 1013 } 1014 1015 static const struct drm_info_list drm_vram_mm_debugfs_list[] = { 1016 { "vram-mm", drm_vram_mm_debugfs, 0, NULL }, 1017 }; 1018 1019 /** 1020 * drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file. 1021 * 1022 * @minor: drm minor device. 1023 * 1024 */ 1025 void drm_vram_mm_debugfs_init(struct drm_minor *minor) 1026 { 1027 drm_debugfs_create_files(drm_vram_mm_debugfs_list, 1028 ARRAY_SIZE(drm_vram_mm_debugfs_list), 1029 minor->debugfs_root, minor); 1030 } 1031 EXPORT_SYMBOL(drm_vram_mm_debugfs_init); 1032 1033 static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, 1034 uint64_t vram_base, size_t vram_size) 1035 { 1036 int ret; 1037 1038 vmm->vram_base = vram_base; 1039 vmm->vram_size = vram_size; 1040 1041 ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, 1042 dev->anon_inode->i_mapping, 1043 dev->vma_offset_manager, 1044 true); 1045 if (ret) 1046 return ret; 1047 1048 ret = ttm_range_man_init(&vmm->bdev, TTM_PL_VRAM, 1049 false, vram_size >> PAGE_SHIFT); 1050 if (ret) 1051 return ret; 1052 1053 return 0; 1054 } 1055 1056 static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm) 1057 { 1058 ttm_range_man_fini(&vmm->bdev, TTM_PL_VRAM); 1059 ttm_bo_device_release(&vmm->bdev); 1060 } 1061 1062 /* 1063 * Helpers for integration with struct drm_device 1064 */ 1065 1066 /* deprecated; use drmm_vram_mm_init() */ 1067 struct drm_vram_mm *drm_vram_helper_alloc_mm( 1068 struct drm_device *dev, uint64_t vram_base, size_t vram_size) 1069 { 1070 int ret; 1071 1072 if (WARN_ON(dev->vram_mm)) 1073 return dev->vram_mm; 1074 1075 dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL); 1076 if (!dev->vram_mm) 1077 return ERR_PTR(-ENOMEM); 1078 1079 ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size); 1080 if (ret) 1081 goto err_kfree; 1082 1083 return dev->vram_mm; 1084 1085 err_kfree: 1086 kfree(dev->vram_mm); 1087 dev->vram_mm = NULL; 1088 return ERR_PTR(ret); 1089 } 1090 EXPORT_SYMBOL(drm_vram_helper_alloc_mm); 1091 1092 void drm_vram_helper_release_mm(struct drm_device *dev) 1093 { 1094 if (!dev->vram_mm) 1095 return; 1096 1097 drm_vram_mm_cleanup(dev->vram_mm); 1098 kfree(dev->vram_mm); 1099 dev->vram_mm = NULL; 1100 } 1101 EXPORT_SYMBOL(drm_vram_helper_release_mm); 1102 1103 static void drm_vram_mm_release(struct drm_device *dev, void *ptr) 1104 { 1105 drm_vram_helper_release_mm(dev); 1106 } 1107 1108 /** 1109 * drmm_vram_helper_init - Initializes a device's instance of 1110 * &struct drm_vram_mm 1111 * @dev: the DRM device 1112 * @vram_base: the base address of the video memory 1113 * @vram_size: the size of the video memory in bytes 1114 * 1115 * Creates a new instance of &struct drm_vram_mm and stores it in 1116 * struct &drm_device.vram_mm. The instance is auto-managed and cleaned 1117 * up as part of device cleanup. Calling this function multiple times 1118 * will generate an error message. 1119 * 1120 * Returns: 1121 * 0 on success, or a negative errno code otherwise. 1122 */ 1123 int drmm_vram_helper_init(struct drm_device *dev, uint64_t vram_base, 1124 size_t vram_size) 1125 { 1126 struct drm_vram_mm *vram_mm; 1127 1128 if (drm_WARN_ON_ONCE(dev, dev->vram_mm)) 1129 return 0; 1130 1131 vram_mm = drm_vram_helper_alloc_mm(dev, vram_base, vram_size); 1132 if (IS_ERR(vram_mm)) 1133 return PTR_ERR(vram_mm); 1134 return drmm_add_action_or_reset(dev, drm_vram_mm_release, NULL); 1135 } 1136 EXPORT_SYMBOL(drmm_vram_helper_init); 1137 1138 /* 1139 * Mode-config helpers 1140 */ 1141 1142 static enum drm_mode_status 1143 drm_vram_helper_mode_valid_internal(struct drm_device *dev, 1144 const struct drm_display_mode *mode, 1145 unsigned long max_bpp) 1146 { 1147 struct drm_vram_mm *vmm = dev->vram_mm; 1148 unsigned long fbsize, fbpages, max_fbpages; 1149 1150 if (WARN_ON(!dev->vram_mm)) 1151 return MODE_BAD; 1152 1153 max_fbpages = (vmm->vram_size / 2) >> PAGE_SHIFT; 1154 1155 fbsize = mode->hdisplay * mode->vdisplay * max_bpp; 1156 fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE); 1157 1158 if (fbpages > max_fbpages) 1159 return MODE_MEM; 1160 1161 return MODE_OK; 1162 } 1163 1164 /** 1165 * drm_vram_helper_mode_valid - Tests if a display mode's 1166 * framebuffer fits into the available video memory. 1167 * @dev: the DRM device 1168 * @mode: the mode to test 1169 * 1170 * This function tests if enough video memory is available for using the 1171 * specified display mode. Atomic modesetting requires importing the 1172 * designated framebuffer into video memory before evicting the active 1173 * one. Hence, any framebuffer may consume at most half of the available 1174 * VRAM. Display modes that require a larger framebuffer can not be used, 1175 * even if the CRTC does support them. Each framebuffer is assumed to 1176 * have 32-bit color depth. 1177 * 1178 * Note: 1179 * The function can only test if the display mode is supported in 1180 * general. If there are too many framebuffers pinned to video memory, 1181 * a display mode may still not be usable in practice. The color depth of 1182 * 32-bit fits all current use case. A more flexible test can be added 1183 * when necessary. 1184 * 1185 * Returns: 1186 * MODE_OK if the display mode is supported, or an error code of type 1187 * enum drm_mode_status otherwise. 1188 */ 1189 enum drm_mode_status 1190 drm_vram_helper_mode_valid(struct drm_device *dev, 1191 const struct drm_display_mode *mode) 1192 { 1193 static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */ 1194 1195 return drm_vram_helper_mode_valid_internal(dev, mode, max_bpp); 1196 } 1197 EXPORT_SYMBOL(drm_vram_helper_mode_valid); 1198 1199 MODULE_DESCRIPTION("DRM VRAM memory-management helpers"); 1200 MODULE_LICENSE("GPL"); 1201