1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 #include <drm/drm_gem_vram_helper.h> 4 #include <drm/drm_device.h> 5 #include <drm/drm_mode.h> 6 #include <drm/drm_prime.h> 7 #include <drm/drm_vram_mm_helper.h> 8 #include <drm/ttm/ttm_page_alloc.h> 9 10 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; 11 12 /** 13 * DOC: overview 14 * 15 * This library provides a GEM buffer object that is backed by video RAM 16 * (VRAM). It can be used for framebuffer devices with dedicated memory. 17 */ 18 19 /* 20 * Buffer-objects helpers 21 */ 22 23 static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo) 24 { 25 /* We got here via ttm_bo_put(), which means that the 26 * TTM buffer object in 'bo' has already been cleaned 27 * up; only release the GEM object. 28 */ 29 drm_gem_object_release(&gbo->bo.base); 30 } 31 32 static void drm_gem_vram_destroy(struct drm_gem_vram_object *gbo) 33 { 34 drm_gem_vram_cleanup(gbo); 35 kfree(gbo); 36 } 37 38 static void ttm_buffer_object_destroy(struct ttm_buffer_object *bo) 39 { 40 struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo); 41 42 drm_gem_vram_destroy(gbo); 43 } 44 45 static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo, 46 unsigned long pl_flag) 47 { 48 unsigned int i; 49 unsigned int c = 0; 50 51 gbo->placement.placement = gbo->placements; 52 gbo->placement.busy_placement = gbo->placements; 53 54 if (pl_flag & TTM_PL_FLAG_VRAM) 55 gbo->placements[c++].flags = TTM_PL_FLAG_WC | 56 TTM_PL_FLAG_UNCACHED | 57 TTM_PL_FLAG_VRAM; 58 59 if (pl_flag & TTM_PL_FLAG_SYSTEM) 60 gbo->placements[c++].flags = TTM_PL_MASK_CACHING | 61 TTM_PL_FLAG_SYSTEM; 62 63 if (!c) 64 gbo->placements[c++].flags = TTM_PL_MASK_CACHING | 65 TTM_PL_FLAG_SYSTEM; 66 67 gbo->placement.num_placement = c; 68 gbo->placement.num_busy_placement = c; 69 70 for (i = 0; i < c; ++i) { 71 gbo->placements[i].fpfn = 0; 72 gbo->placements[i].lpfn = 0; 73 } 74 } 75 76 static int drm_gem_vram_init(struct drm_device *dev, 77 struct ttm_bo_device *bdev, 78 struct drm_gem_vram_object *gbo, 79 size_t size, unsigned long pg_align, 80 bool interruptible) 81 { 82 int ret; 83 size_t acc_size; 84 85 if (!gbo->bo.base.funcs) 86 gbo->bo.base.funcs = &drm_gem_vram_object_funcs; 87 88 ret = drm_gem_object_init(dev, &gbo->bo.base, size); 89 if (ret) 90 return ret; 91 92 acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo)); 93 94 gbo->bo.bdev = bdev; 95 drm_gem_vram_placement(gbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 96 97 ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device, 98 &gbo->placement, pg_align, interruptible, acc_size, 99 NULL, NULL, ttm_buffer_object_destroy); 100 if (ret) 101 goto err_drm_gem_object_release; 102 103 return 0; 104 105 err_drm_gem_object_release: 106 drm_gem_object_release(&gbo->bo.base); 107 return ret; 108 } 109 110 /** 111 * drm_gem_vram_create() - Creates a VRAM-backed GEM object 112 * @dev: the DRM device 113 * @bdev: the TTM BO device backing the object 114 * @size: the buffer size in bytes 115 * @pg_align: the buffer's alignment in multiples of the page size 116 * @interruptible: sleep interruptible if waiting for memory 117 * 118 * Returns: 119 * A new instance of &struct drm_gem_vram_object on success, or 120 * an ERR_PTR()-encoded error code otherwise. 121 */ 122 struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, 123 struct ttm_bo_device *bdev, 124 size_t size, 125 unsigned long pg_align, 126 bool interruptible) 127 { 128 struct drm_gem_vram_object *gbo; 129 int ret; 130 131 gbo = kzalloc(sizeof(*gbo), GFP_KERNEL); 132 if (!gbo) 133 return ERR_PTR(-ENOMEM); 134 135 ret = drm_gem_vram_init(dev, bdev, gbo, size, pg_align, interruptible); 136 if (ret < 0) 137 goto err_kfree; 138 139 return gbo; 140 141 err_kfree: 142 kfree(gbo); 143 return ERR_PTR(ret); 144 } 145 EXPORT_SYMBOL(drm_gem_vram_create); 146 147 /** 148 * drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object 149 * @gbo: the GEM VRAM object 150 * 151 * See ttm_bo_put() for more information. 152 */ 153 void drm_gem_vram_put(struct drm_gem_vram_object *gbo) 154 { 155 ttm_bo_put(&gbo->bo); 156 } 157 EXPORT_SYMBOL(drm_gem_vram_put); 158 159 /** 160 * drm_gem_vram_mmap_offset() - Returns a GEM VRAM object's mmap offset 161 * @gbo: the GEM VRAM object 162 * 163 * See drm_vma_node_offset_addr() for more information. 164 * 165 * Returns: 166 * The buffer object's offset for userspace mappings on success, or 167 * 0 if no offset is allocated. 168 */ 169 u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo) 170 { 171 return drm_vma_node_offset_addr(&gbo->bo.base.vma_node); 172 } 173 EXPORT_SYMBOL(drm_gem_vram_mmap_offset); 174 175 /** 176 * drm_gem_vram_offset() - \ 177 Returns a GEM VRAM object's offset in video memory 178 * @gbo: the GEM VRAM object 179 * 180 * This function returns the buffer object's offset in the device's video 181 * memory. The buffer object has to be pinned to %TTM_PL_VRAM. 182 * 183 * Returns: 184 * The buffer object's offset in video memory on success, or 185 * a negative errno code otherwise. 186 */ 187 s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo) 188 { 189 if (WARN_ON_ONCE(!gbo->pin_count)) 190 return (s64)-ENODEV; 191 return gbo->bo.offset; 192 } 193 EXPORT_SYMBOL(drm_gem_vram_offset); 194 195 /** 196 * drm_gem_vram_pin() - Pins a GEM VRAM object in a region. 197 * @gbo: the GEM VRAM object 198 * @pl_flag: a bitmask of possible memory regions 199 * 200 * Pinning a buffer object ensures that it is not evicted from 201 * a memory region. A pinned buffer object has to be unpinned before 202 * it can be pinned to another region. If the pl_flag argument is 0, 203 * the buffer is pinned at its current location (video RAM or system 204 * memory). 205 * 206 * Returns: 207 * 0 on success, or 208 * a negative error code otherwise. 209 */ 210 int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag) 211 { 212 int i, ret; 213 struct ttm_operation_ctx ctx = { false, false }; 214 215 ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); 216 if (ret < 0) 217 return ret; 218 219 if (gbo->pin_count) 220 goto out; 221 222 if (pl_flag) 223 drm_gem_vram_placement(gbo, pl_flag); 224 225 for (i = 0; i < gbo->placement.num_placement; ++i) 226 gbo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 227 228 ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx); 229 if (ret < 0) 230 goto err_ttm_bo_unreserve; 231 232 out: 233 ++gbo->pin_count; 234 ttm_bo_unreserve(&gbo->bo); 235 236 return 0; 237 238 err_ttm_bo_unreserve: 239 ttm_bo_unreserve(&gbo->bo); 240 return ret; 241 } 242 EXPORT_SYMBOL(drm_gem_vram_pin); 243 244 /** 245 * drm_gem_vram_unpin() - Unpins a GEM VRAM object 246 * @gbo: the GEM VRAM object 247 * 248 * Returns: 249 * 0 on success, or 250 * a negative error code otherwise. 251 */ 252 int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo) 253 { 254 int i, ret; 255 struct ttm_operation_ctx ctx = { false, false }; 256 257 ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); 258 if (ret < 0) 259 return ret; 260 261 if (WARN_ON_ONCE(!gbo->pin_count)) 262 goto out; 263 264 --gbo->pin_count; 265 if (gbo->pin_count) 266 goto out; 267 268 for (i = 0; i < gbo->placement.num_placement ; ++i) 269 gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 270 271 ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx); 272 if (ret < 0) 273 goto err_ttm_bo_unreserve; 274 275 out: 276 ttm_bo_unreserve(&gbo->bo); 277 278 return 0; 279 280 err_ttm_bo_unreserve: 281 ttm_bo_unreserve(&gbo->bo); 282 return ret; 283 } 284 EXPORT_SYMBOL(drm_gem_vram_unpin); 285 286 /** 287 * drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space 288 * @gbo: the GEM VRAM object 289 * @map: establish a mapping if necessary 290 * @is_iomem: returns true if the mapped memory is I/O memory, or false \ 291 otherwise; can be NULL 292 * 293 * This function maps the buffer object into the kernel's address space 294 * or returns the current mapping. If the parameter map is false, the 295 * function only queries the current mapping, but does not establish a 296 * new one. 297 * 298 * Returns: 299 * The buffers virtual address if mapped, or 300 * NULL if not mapped, or 301 * an ERR_PTR()-encoded error code otherwise. 302 */ 303 void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map, 304 bool *is_iomem) 305 { 306 int ret; 307 struct ttm_bo_kmap_obj *kmap = &gbo->kmap; 308 309 if (kmap->virtual || !map) 310 goto out; 311 312 ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap); 313 if (ret) 314 return ERR_PTR(ret); 315 316 out: 317 if (!is_iomem) 318 return kmap->virtual; 319 if (!kmap->virtual) { 320 *is_iomem = false; 321 return NULL; 322 } 323 return ttm_kmap_obj_virtual(kmap, is_iomem); 324 } 325 EXPORT_SYMBOL(drm_gem_vram_kmap); 326 327 /** 328 * drm_gem_vram_kunmap() - Unmaps a GEM VRAM object 329 * @gbo: the GEM VRAM object 330 */ 331 void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo) 332 { 333 struct ttm_bo_kmap_obj *kmap = &gbo->kmap; 334 335 if (!kmap->virtual) 336 return; 337 338 ttm_bo_kunmap(kmap); 339 kmap->virtual = NULL; 340 } 341 EXPORT_SYMBOL(drm_gem_vram_kunmap); 342 343 /** 344 * drm_gem_vram_fill_create_dumb() - \ 345 Helper for implementing &struct drm_driver.dumb_create 346 * @file: the DRM file 347 * @dev: the DRM device 348 * @bdev: the TTM BO device managing the buffer object 349 * @pg_align: the buffer's alignment in multiples of the page size 350 * @interruptible: sleep interruptible if waiting for memory 351 * @args: the arguments as provided to \ 352 &struct drm_driver.dumb_create 353 * 354 * This helper function fills &struct drm_mode_create_dumb, which is used 355 * by &struct drm_driver.dumb_create. Implementations of this interface 356 * should forwards their arguments to this helper, plus the driver-specific 357 * parameters. 358 * 359 * Returns: 360 * 0 on success, or 361 * a negative error code otherwise. 362 */ 363 int drm_gem_vram_fill_create_dumb(struct drm_file *file, 364 struct drm_device *dev, 365 struct ttm_bo_device *bdev, 366 unsigned long pg_align, 367 bool interruptible, 368 struct drm_mode_create_dumb *args) 369 { 370 size_t pitch, size; 371 struct drm_gem_vram_object *gbo; 372 int ret; 373 u32 handle; 374 375 pitch = args->width * ((args->bpp + 7) / 8); 376 size = pitch * args->height; 377 378 size = roundup(size, PAGE_SIZE); 379 if (!size) 380 return -EINVAL; 381 382 gbo = drm_gem_vram_create(dev, bdev, size, pg_align, interruptible); 383 if (IS_ERR(gbo)) 384 return PTR_ERR(gbo); 385 386 ret = drm_gem_handle_create(file, &gbo->bo.base, &handle); 387 if (ret) 388 goto err_drm_gem_object_put_unlocked; 389 390 drm_gem_object_put_unlocked(&gbo->bo.base); 391 392 args->pitch = pitch; 393 args->size = size; 394 args->handle = handle; 395 396 return 0; 397 398 err_drm_gem_object_put_unlocked: 399 drm_gem_object_put_unlocked(&gbo->bo.base); 400 return ret; 401 } 402 EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb); 403 404 /* 405 * Helpers for struct ttm_bo_driver 406 */ 407 408 static bool drm_is_gem_vram(struct ttm_buffer_object *bo) 409 { 410 return (bo->destroy == ttm_buffer_object_destroy); 411 } 412 413 /** 414 * drm_gem_vram_bo_driver_evict_flags() - \ 415 Implements &struct ttm_bo_driver.evict_flags 416 * @bo: TTM buffer object. Refers to &struct drm_gem_vram_object.bo 417 * @pl: TTM placement information. 418 */ 419 void drm_gem_vram_bo_driver_evict_flags(struct ttm_buffer_object *bo, 420 struct ttm_placement *pl) 421 { 422 struct drm_gem_vram_object *gbo; 423 424 /* TTM may pass BOs that are not GEM VRAM BOs. */ 425 if (!drm_is_gem_vram(bo)) 426 return; 427 428 gbo = drm_gem_vram_of_bo(bo); 429 drm_gem_vram_placement(gbo, TTM_PL_FLAG_SYSTEM); 430 *pl = gbo->placement; 431 } 432 EXPORT_SYMBOL(drm_gem_vram_bo_driver_evict_flags); 433 434 /** 435 * drm_gem_vram_bo_driver_verify_access() - \ 436 Implements &struct ttm_bo_driver.verify_access 437 * @bo: TTM buffer object. Refers to &struct drm_gem_vram_object.bo 438 * @filp: File pointer. 439 * 440 * Returns: 441 * 0 on success, or 442 * a negative errno code otherwise. 443 */ 444 int drm_gem_vram_bo_driver_verify_access(struct ttm_buffer_object *bo, 445 struct file *filp) 446 { 447 struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo); 448 449 return drm_vma_node_verify_access(&gbo->bo.base.vma_node, 450 filp->private_data); 451 } 452 EXPORT_SYMBOL(drm_gem_vram_bo_driver_verify_access); 453 454 /* 455 * drm_gem_vram_mm_funcs - Functions for &struct drm_vram_mm 456 * 457 * Most users of @struct drm_gem_vram_object will also use 458 * @struct drm_vram_mm. This instance of &struct drm_vram_mm_funcs 459 * can be used to connect both. 460 */ 461 const struct drm_vram_mm_funcs drm_gem_vram_mm_funcs = { 462 .evict_flags = drm_gem_vram_bo_driver_evict_flags, 463 .verify_access = drm_gem_vram_bo_driver_verify_access 464 }; 465 EXPORT_SYMBOL(drm_gem_vram_mm_funcs); 466 467 /* 468 * Helpers for struct drm_gem_object_funcs 469 */ 470 471 /** 472 * drm_gem_vram_object_free() - \ 473 Implements &struct drm_gem_object_funcs.free 474 * @gem: GEM object. Refers to &struct drm_gem_vram_object.gem 475 */ 476 static void drm_gem_vram_object_free(struct drm_gem_object *gem) 477 { 478 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); 479 480 drm_gem_vram_put(gbo); 481 } 482 483 /* 484 * Helpers for dump buffers 485 */ 486 487 /** 488 * drm_gem_vram_driver_create_dumb() - \ 489 Implements &struct drm_driver.dumb_create 490 * @file: the DRM file 491 * @dev: the DRM device 492 * @args: the arguments as provided to \ 493 &struct drm_driver.dumb_create 494 * 495 * This function requires the driver to use @drm_device.vram_mm for its 496 * instance of VRAM MM. 497 * 498 * Returns: 499 * 0 on success, or 500 * a negative error code otherwise. 501 */ 502 int drm_gem_vram_driver_dumb_create(struct drm_file *file, 503 struct drm_device *dev, 504 struct drm_mode_create_dumb *args) 505 { 506 if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized")) 507 return -EINVAL; 508 509 return drm_gem_vram_fill_create_dumb(file, dev, &dev->vram_mm->bdev, 0, 510 false, args); 511 } 512 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create); 513 514 /** 515 * drm_gem_vram_driver_dumb_mmap_offset() - \ 516 Implements &struct drm_driver.dumb_mmap_offset 517 * @file: DRM file pointer. 518 * @dev: DRM device. 519 * @handle: GEM handle 520 * @offset: Returns the mapping's memory offset on success 521 * 522 * Returns: 523 * 0 on success, or 524 * a negative errno code otherwise. 525 */ 526 int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file, 527 struct drm_device *dev, 528 uint32_t handle, uint64_t *offset) 529 { 530 struct drm_gem_object *gem; 531 struct drm_gem_vram_object *gbo; 532 533 gem = drm_gem_object_lookup(file, handle); 534 if (!gem) 535 return -ENOENT; 536 537 gbo = drm_gem_vram_of_gem(gem); 538 *offset = drm_gem_vram_mmap_offset(gbo); 539 540 drm_gem_object_put_unlocked(gem); 541 542 return 0; 543 } 544 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset); 545 546 /* 547 * PRIME helpers 548 */ 549 550 /** 551 * drm_gem_vram_object_pin() - \ 552 Implements &struct drm_gem_object_funcs.pin 553 * @gem: The GEM object to pin 554 * 555 * Returns: 556 * 0 on success, or 557 * a negative errno code otherwise. 558 */ 559 static int drm_gem_vram_object_pin(struct drm_gem_object *gem) 560 { 561 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); 562 563 /* Fbdev console emulation is the use case of these PRIME 564 * helpers. This may involve updating a hardware buffer from 565 * a shadow FB. We pin the buffer to it's current location 566 * (either video RAM or system memory) to prevent it from 567 * being relocated during the update operation. If you require 568 * the buffer to be pinned to VRAM, implement a callback that 569 * sets the flags accordingly. 570 */ 571 return drm_gem_vram_pin(gbo, 0); 572 } 573 574 /** 575 * drm_gem_vram_object_unpin() - \ 576 Implements &struct drm_gem_object_funcs.unpin 577 * @gem: The GEM object to unpin 578 */ 579 static void drm_gem_vram_object_unpin(struct drm_gem_object *gem) 580 { 581 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); 582 583 drm_gem_vram_unpin(gbo); 584 } 585 586 /** 587 * drm_gem_vram_object_vmap() - \ 588 Implements &struct drm_gem_object_funcs.vmap 589 * @gem: The GEM object to map 590 * 591 * Returns: 592 * The buffers virtual address on success, or 593 * NULL otherwise. 594 */ 595 static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem) 596 { 597 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); 598 int ret; 599 void *base; 600 601 ret = drm_gem_vram_pin(gbo, 0); 602 if (ret) 603 return NULL; 604 base = drm_gem_vram_kmap(gbo, true, NULL); 605 if (IS_ERR(base)) { 606 drm_gem_vram_unpin(gbo); 607 return NULL; 608 } 609 return base; 610 } 611 612 /** 613 * drm_gem_vram_object_vunmap() - \ 614 Implements &struct drm_gem_object_funcs.vunmap 615 * @gem: The GEM object to unmap 616 * @vaddr: The mapping's base address 617 */ 618 static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem, 619 void *vaddr) 620 { 621 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); 622 623 drm_gem_vram_kunmap(gbo); 624 drm_gem_vram_unpin(gbo); 625 } 626 627 /* 628 * GEM object funcs 629 */ 630 631 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = { 632 .free = drm_gem_vram_object_free, 633 .pin = drm_gem_vram_object_pin, 634 .unpin = drm_gem_vram_object_unpin, 635 .vmap = drm_gem_vram_object_vmap, 636 .vunmap = drm_gem_vram_object_vunmap 637 }; 638