1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright © 2015 Broadcom 4 */ 5 6 /** 7 * DOC: VC4 GEM BO management support 8 * 9 * The VC4 GPU architecture (both scanout and rendering) has direct 10 * access to system memory with no MMU in between. To support it, we 11 * use the GEM CMA helper functions to allocate contiguous ranges of 12 * physical memory for our BOs. 13 * 14 * Since the CMA allocator is very slow, we keep a cache of recently 15 * freed BOs around so that the kernel's allocation of objects for 3D 16 * rendering can return quickly. 17 */ 18 19 #include <linux/dma-buf.h> 20 21 #include <drm/drm_fourcc.h> 22 23 #include "vc4_drv.h" 24 #include "uapi/drm/vc4_drm.h" 25 26 static const struct drm_gem_object_funcs vc4_gem_object_funcs; 27 28 static const char * const bo_type_names[] = { 29 "kernel", 30 "V3D", 31 "V3D shader", 32 "dumb", 33 "binner", 34 "RCL", 35 "BCL", 36 "kernel BO cache", 37 }; 38 39 static bool is_user_label(int label) 40 { 41 return label >= VC4_BO_TYPE_COUNT; 42 } 43 44 static void vc4_bo_stats_print(struct drm_printer *p, struct vc4_dev *vc4) 45 { 46 int i; 47 48 for (i = 0; i < vc4->num_labels; i++) { 49 if (!vc4->bo_labels[i].num_allocated) 50 continue; 51 52 drm_printf(p, "%30s: %6dkb BOs (%d)\n", 53 vc4->bo_labels[i].name, 54 vc4->bo_labels[i].size_allocated / 1024, 55 vc4->bo_labels[i].num_allocated); 56 } 57 58 mutex_lock(&vc4->purgeable.lock); 59 if (vc4->purgeable.num) 60 drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache", 61 vc4->purgeable.size / 1024, vc4->purgeable.num); 62 63 if (vc4->purgeable.purged_num) 64 drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "total purged BO", 65 vc4->purgeable.purged_size / 1024, 66 vc4->purgeable.purged_num); 67 mutex_unlock(&vc4->purgeable.lock); 68 } 69 70 static int vc4_bo_stats_debugfs(struct seq_file *m, void *unused) 71 { 72 struct drm_info_node *node = (struct drm_info_node *)m->private; 73 struct drm_device *dev = node->minor->dev; 74 struct vc4_dev *vc4 = to_vc4_dev(dev); 75 struct drm_printer p = drm_seq_file_printer(m); 76 77 vc4_bo_stats_print(&p, vc4); 78 79 return 0; 80 } 81 82 /* Takes ownership of *name and returns the appropriate slot for it in 83 * the bo_labels[] array, extending it as necessary. 84 * 85 * This is inefficient and could use a hash table instead of walking 86 * an array and strcmp()ing. However, the assumption is that user 87 * labeling will be infrequent (scanout buffers and other long-lived 88 * objects, or debug driver builds), so we can live with it for now. 89 */ 90 static int vc4_get_user_label(struct vc4_dev *vc4, const char *name) 91 { 92 int i; 93 int free_slot = -1; 94 95 for (i = 0; i < vc4->num_labels; i++) { 96 if (!vc4->bo_labels[i].name) { 97 free_slot = i; 98 } else if (strcmp(vc4->bo_labels[i].name, name) == 0) { 99 kfree(name); 100 return i; 101 } 102 } 103 104 if (free_slot != -1) { 105 WARN_ON(vc4->bo_labels[free_slot].num_allocated != 0); 106 vc4->bo_labels[free_slot].name = name; 107 return free_slot; 108 } else { 109 u32 new_label_count = vc4->num_labels + 1; 110 struct vc4_label *new_labels = 111 krealloc(vc4->bo_labels, 112 new_label_count * sizeof(*new_labels), 113 GFP_KERNEL); 114 115 if (!new_labels) { 116 kfree(name); 117 return -1; 118 } 119 120 free_slot = vc4->num_labels; 121 vc4->bo_labels = new_labels; 122 vc4->num_labels = new_label_count; 123 124 vc4->bo_labels[free_slot].name = name; 125 vc4->bo_labels[free_slot].num_allocated = 0; 126 vc4->bo_labels[free_slot].size_allocated = 0; 127 128 return free_slot; 129 } 130 } 131 132 static void vc4_bo_set_label(struct drm_gem_object *gem_obj, int label) 133 { 134 struct vc4_bo *bo = to_vc4_bo(gem_obj); 135 struct vc4_dev *vc4 = to_vc4_dev(gem_obj->dev); 136 137 lockdep_assert_held(&vc4->bo_lock); 138 139 if (label != -1) { 140 vc4->bo_labels[label].num_allocated++; 141 vc4->bo_labels[label].size_allocated += gem_obj->size; 142 } 143 144 vc4->bo_labels[bo->label].num_allocated--; 145 vc4->bo_labels[bo->label].size_allocated -= gem_obj->size; 146 147 if (vc4->bo_labels[bo->label].num_allocated == 0 && 148 is_user_label(bo->label)) { 149 /* Free user BO label slots on last unreference. 150 * Slots are just where we track the stats for a given 151 * name, and once a name is unused we can reuse that 152 * slot. 153 */ 154 kfree(vc4->bo_labels[bo->label].name); 155 vc4->bo_labels[bo->label].name = NULL; 156 } 157 158 bo->label = label; 159 } 160 161 static uint32_t bo_page_index(size_t size) 162 { 163 return (size / PAGE_SIZE) - 1; 164 } 165 166 static void vc4_bo_destroy(struct vc4_bo *bo) 167 { 168 struct drm_gem_object *obj = &bo->base.base; 169 struct vc4_dev *vc4 = to_vc4_dev(obj->dev); 170 171 lockdep_assert_held(&vc4->bo_lock); 172 173 vc4_bo_set_label(obj, -1); 174 175 if (bo->validated_shader) { 176 kfree(bo->validated_shader->uniform_addr_offsets); 177 kfree(bo->validated_shader->texture_samples); 178 kfree(bo->validated_shader); 179 bo->validated_shader = NULL; 180 } 181 182 drm_gem_cma_free(&bo->base); 183 } 184 185 static void vc4_bo_remove_from_cache(struct vc4_bo *bo) 186 { 187 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); 188 189 lockdep_assert_held(&vc4->bo_lock); 190 list_del(&bo->unref_head); 191 list_del(&bo->size_head); 192 } 193 194 static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev, 195 size_t size) 196 { 197 struct vc4_dev *vc4 = to_vc4_dev(dev); 198 uint32_t page_index = bo_page_index(size); 199 200 if (vc4->bo_cache.size_list_size <= page_index) { 201 uint32_t new_size = max(vc4->bo_cache.size_list_size * 2, 202 page_index + 1); 203 struct list_head *new_list; 204 uint32_t i; 205 206 new_list = kmalloc_array(new_size, sizeof(struct list_head), 207 GFP_KERNEL); 208 if (!new_list) 209 return NULL; 210 211 /* Rebase the old cached BO lists to their new list 212 * head locations. 213 */ 214 for (i = 0; i < vc4->bo_cache.size_list_size; i++) { 215 struct list_head *old_list = 216 &vc4->bo_cache.size_list[i]; 217 218 if (list_empty(old_list)) 219 INIT_LIST_HEAD(&new_list[i]); 220 else 221 list_replace(old_list, &new_list[i]); 222 } 223 /* And initialize the brand new BO list heads. */ 224 for (i = vc4->bo_cache.size_list_size; i < new_size; i++) 225 INIT_LIST_HEAD(&new_list[i]); 226 227 kfree(vc4->bo_cache.size_list); 228 vc4->bo_cache.size_list = new_list; 229 vc4->bo_cache.size_list_size = new_size; 230 } 231 232 return &vc4->bo_cache.size_list[page_index]; 233 } 234 235 static void vc4_bo_cache_purge(struct drm_device *dev) 236 { 237 struct vc4_dev *vc4 = to_vc4_dev(dev); 238 239 mutex_lock(&vc4->bo_lock); 240 while (!list_empty(&vc4->bo_cache.time_list)) { 241 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list, 242 struct vc4_bo, unref_head); 243 vc4_bo_remove_from_cache(bo); 244 vc4_bo_destroy(bo); 245 } 246 mutex_unlock(&vc4->bo_lock); 247 } 248 249 void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo) 250 { 251 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); 252 253 mutex_lock(&vc4->purgeable.lock); 254 list_add_tail(&bo->size_head, &vc4->purgeable.list); 255 vc4->purgeable.num++; 256 vc4->purgeable.size += bo->base.base.size; 257 mutex_unlock(&vc4->purgeable.lock); 258 } 259 260 static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo) 261 { 262 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); 263 264 /* list_del_init() is used here because the caller might release 265 * the purgeable lock in order to acquire the madv one and update the 266 * madv status. 267 * During this short period of time a user might decide to mark 268 * the BO as unpurgeable, and if bo->madv is set to 269 * VC4_MADV_DONTNEED it will try to remove the BO from the 270 * purgeable list which will fail if the ->next/prev fields 271 * are set to LIST_POISON1/LIST_POISON2 (which is what 272 * list_del() does). 273 * Re-initializing the list element guarantees that list_del() 274 * will work correctly even if it's a NOP. 275 */ 276 list_del_init(&bo->size_head); 277 vc4->purgeable.num--; 278 vc4->purgeable.size -= bo->base.base.size; 279 } 280 281 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo) 282 { 283 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); 284 285 mutex_lock(&vc4->purgeable.lock); 286 vc4_bo_remove_from_purgeable_pool_locked(bo); 287 mutex_unlock(&vc4->purgeable.lock); 288 } 289 290 static void vc4_bo_purge(struct drm_gem_object *obj) 291 { 292 struct vc4_bo *bo = to_vc4_bo(obj); 293 struct drm_device *dev = obj->dev; 294 295 WARN_ON(!mutex_is_locked(&bo->madv_lock)); 296 WARN_ON(bo->madv != VC4_MADV_DONTNEED); 297 298 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 299 300 dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr); 301 bo->base.vaddr = NULL; 302 bo->madv = __VC4_MADV_PURGED; 303 } 304 305 static void vc4_bo_userspace_cache_purge(struct drm_device *dev) 306 { 307 struct vc4_dev *vc4 = to_vc4_dev(dev); 308 309 mutex_lock(&vc4->purgeable.lock); 310 while (!list_empty(&vc4->purgeable.list)) { 311 struct vc4_bo *bo = list_first_entry(&vc4->purgeable.list, 312 struct vc4_bo, size_head); 313 struct drm_gem_object *obj = &bo->base.base; 314 size_t purged_size = 0; 315 316 vc4_bo_remove_from_purgeable_pool_locked(bo); 317 318 /* Release the purgeable lock while we're purging the BO so 319 * that other people can continue inserting things in the 320 * purgeable pool without having to wait for all BOs to be 321 * purged. 322 */ 323 mutex_unlock(&vc4->purgeable.lock); 324 mutex_lock(&bo->madv_lock); 325 326 /* Since we released the purgeable pool lock before acquiring 327 * the BO madv one, the user may have marked the BO as WILLNEED 328 * and re-used it in the meantime. 329 * Before purging the BO we need to make sure 330 * - it is still marked as DONTNEED 331 * - it has not been re-inserted in the purgeable list 332 * - it is not used by HW blocks 333 * If one of these conditions is not met, just skip the entry. 334 */ 335 if (bo->madv == VC4_MADV_DONTNEED && 336 list_empty(&bo->size_head) && 337 !refcount_read(&bo->usecnt)) { 338 purged_size = bo->base.base.size; 339 vc4_bo_purge(obj); 340 } 341 mutex_unlock(&bo->madv_lock); 342 mutex_lock(&vc4->purgeable.lock); 343 344 if (purged_size) { 345 vc4->purgeable.purged_size += purged_size; 346 vc4->purgeable.purged_num++; 347 } 348 } 349 mutex_unlock(&vc4->purgeable.lock); 350 } 351 352 static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev, 353 uint32_t size, 354 enum vc4_kernel_bo_type type) 355 { 356 struct vc4_dev *vc4 = to_vc4_dev(dev); 357 uint32_t page_index = bo_page_index(size); 358 struct vc4_bo *bo = NULL; 359 360 mutex_lock(&vc4->bo_lock); 361 if (page_index >= vc4->bo_cache.size_list_size) 362 goto out; 363 364 if (list_empty(&vc4->bo_cache.size_list[page_index])) 365 goto out; 366 367 bo = list_first_entry(&vc4->bo_cache.size_list[page_index], 368 struct vc4_bo, size_head); 369 vc4_bo_remove_from_cache(bo); 370 kref_init(&bo->base.base.refcount); 371 372 out: 373 if (bo) 374 vc4_bo_set_label(&bo->base.base, type); 375 mutex_unlock(&vc4->bo_lock); 376 return bo; 377 } 378 379 /** 380 * vc4_create_object - Implementation of driver->gem_create_object. 381 * @dev: DRM device 382 * @size: Size in bytes of the memory the object will reference 383 * 384 * This lets the CMA helpers allocate object structs for us, and keep 385 * our BO stats correct. 386 */ 387 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size) 388 { 389 struct vc4_dev *vc4 = to_vc4_dev(dev); 390 struct vc4_bo *bo; 391 392 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 393 if (!bo) 394 return ERR_PTR(-ENOMEM); 395 396 bo->madv = VC4_MADV_WILLNEED; 397 refcount_set(&bo->usecnt, 0); 398 mutex_init(&bo->madv_lock); 399 mutex_lock(&vc4->bo_lock); 400 bo->label = VC4_BO_TYPE_KERNEL; 401 vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++; 402 vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size; 403 mutex_unlock(&vc4->bo_lock); 404 405 bo->base.base.funcs = &vc4_gem_object_funcs; 406 407 return &bo->base.base; 408 } 409 410 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, 411 bool allow_unzeroed, enum vc4_kernel_bo_type type) 412 { 413 size_t size = roundup(unaligned_size, PAGE_SIZE); 414 struct vc4_dev *vc4 = to_vc4_dev(dev); 415 struct drm_gem_cma_object *cma_obj; 416 struct vc4_bo *bo; 417 418 if (size == 0) 419 return ERR_PTR(-EINVAL); 420 421 /* First, try to get a vc4_bo from the kernel BO cache. */ 422 bo = vc4_bo_get_from_cache(dev, size, type); 423 if (bo) { 424 if (!allow_unzeroed) 425 memset(bo->base.vaddr, 0, bo->base.base.size); 426 return bo; 427 } 428 429 cma_obj = drm_gem_cma_create(dev, size); 430 if (IS_ERR(cma_obj)) { 431 /* 432 * If we've run out of CMA memory, kill the cache of 433 * CMA allocations we've got laying around and try again. 434 */ 435 vc4_bo_cache_purge(dev); 436 cma_obj = drm_gem_cma_create(dev, size); 437 } 438 439 if (IS_ERR(cma_obj)) { 440 /* 441 * Still not enough CMA memory, purge the userspace BO 442 * cache and retry. 443 * This is sub-optimal since we purge the whole userspace 444 * BO cache which forces user that want to re-use the BO to 445 * restore its initial content. 446 * Ideally, we should purge entries one by one and retry 447 * after each to see if CMA allocation succeeds. Or even 448 * better, try to find an entry with at least the same 449 * size. 450 */ 451 vc4_bo_userspace_cache_purge(dev); 452 cma_obj = drm_gem_cma_create(dev, size); 453 } 454 455 if (IS_ERR(cma_obj)) { 456 struct drm_printer p = drm_info_printer(vc4->base.dev); 457 DRM_ERROR("Failed to allocate from CMA:\n"); 458 vc4_bo_stats_print(&p, vc4); 459 return ERR_PTR(-ENOMEM); 460 } 461 bo = to_vc4_bo(&cma_obj->base); 462 463 /* By default, BOs do not support the MADV ioctl. This will be enabled 464 * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB 465 * BOs). 466 */ 467 bo->madv = __VC4_MADV_NOTSUPP; 468 469 mutex_lock(&vc4->bo_lock); 470 vc4_bo_set_label(&cma_obj->base, type); 471 mutex_unlock(&vc4->bo_lock); 472 473 return bo; 474 } 475 476 int vc4_dumb_create(struct drm_file *file_priv, 477 struct drm_device *dev, 478 struct drm_mode_create_dumb *args) 479 { 480 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 481 struct vc4_bo *bo = NULL; 482 int ret; 483 484 if (args->pitch < min_pitch) 485 args->pitch = min_pitch; 486 487 if (args->size < args->pitch * args->height) 488 args->size = args->pitch * args->height; 489 490 bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB); 491 if (IS_ERR(bo)) 492 return PTR_ERR(bo); 493 494 bo->madv = VC4_MADV_WILLNEED; 495 496 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); 497 drm_gem_object_put(&bo->base.base); 498 499 return ret; 500 } 501 502 static void vc4_bo_cache_free_old(struct drm_device *dev) 503 { 504 struct vc4_dev *vc4 = to_vc4_dev(dev); 505 unsigned long expire_time = jiffies - msecs_to_jiffies(1000); 506 507 lockdep_assert_held(&vc4->bo_lock); 508 509 while (!list_empty(&vc4->bo_cache.time_list)) { 510 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list, 511 struct vc4_bo, unref_head); 512 if (time_before(expire_time, bo->free_time)) { 513 mod_timer(&vc4->bo_cache.time_timer, 514 round_jiffies_up(jiffies + 515 msecs_to_jiffies(1000))); 516 return; 517 } 518 519 vc4_bo_remove_from_cache(bo); 520 vc4_bo_destroy(bo); 521 } 522 } 523 524 /* Called on the last userspace/kernel unreference of the BO. Returns 525 * it to the BO cache if possible, otherwise frees it. 526 */ 527 static void vc4_free_object(struct drm_gem_object *gem_bo) 528 { 529 struct drm_device *dev = gem_bo->dev; 530 struct vc4_dev *vc4 = to_vc4_dev(dev); 531 struct vc4_bo *bo = to_vc4_bo(gem_bo); 532 struct list_head *cache_list; 533 534 /* Remove the BO from the purgeable list. */ 535 mutex_lock(&bo->madv_lock); 536 if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt)) 537 vc4_bo_remove_from_purgeable_pool(bo); 538 mutex_unlock(&bo->madv_lock); 539 540 mutex_lock(&vc4->bo_lock); 541 /* If the object references someone else's memory, we can't cache it. 542 */ 543 if (gem_bo->import_attach) { 544 vc4_bo_destroy(bo); 545 goto out; 546 } 547 548 /* Don't cache if it was publicly named. */ 549 if (gem_bo->name) { 550 vc4_bo_destroy(bo); 551 goto out; 552 } 553 554 /* If this object was partially constructed but CMA allocation 555 * had failed, just free it. Can also happen when the BO has been 556 * purged. 557 */ 558 if (!bo->base.vaddr) { 559 vc4_bo_destroy(bo); 560 goto out; 561 } 562 563 cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size); 564 if (!cache_list) { 565 vc4_bo_destroy(bo); 566 goto out; 567 } 568 569 if (bo->validated_shader) { 570 kfree(bo->validated_shader->uniform_addr_offsets); 571 kfree(bo->validated_shader->texture_samples); 572 kfree(bo->validated_shader); 573 bo->validated_shader = NULL; 574 } 575 576 /* Reset madv and usecnt before adding the BO to the cache. */ 577 bo->madv = __VC4_MADV_NOTSUPP; 578 refcount_set(&bo->usecnt, 0); 579 580 bo->t_format = false; 581 bo->free_time = jiffies; 582 list_add(&bo->size_head, cache_list); 583 list_add(&bo->unref_head, &vc4->bo_cache.time_list); 584 585 vc4_bo_set_label(&bo->base.base, VC4_BO_TYPE_KERNEL_CACHE); 586 587 vc4_bo_cache_free_old(dev); 588 589 out: 590 mutex_unlock(&vc4->bo_lock); 591 } 592 593 static void vc4_bo_cache_time_work(struct work_struct *work) 594 { 595 struct vc4_dev *vc4 = 596 container_of(work, struct vc4_dev, bo_cache.time_work); 597 struct drm_device *dev = &vc4->base; 598 599 mutex_lock(&vc4->bo_lock); 600 vc4_bo_cache_free_old(dev); 601 mutex_unlock(&vc4->bo_lock); 602 } 603 604 int vc4_bo_inc_usecnt(struct vc4_bo *bo) 605 { 606 int ret; 607 608 /* Fast path: if the BO is already retained by someone, no need to 609 * check the madv status. 610 */ 611 if (refcount_inc_not_zero(&bo->usecnt)) 612 return 0; 613 614 mutex_lock(&bo->madv_lock); 615 switch (bo->madv) { 616 case VC4_MADV_WILLNEED: 617 if (!refcount_inc_not_zero(&bo->usecnt)) 618 refcount_set(&bo->usecnt, 1); 619 ret = 0; 620 break; 621 case VC4_MADV_DONTNEED: 622 /* We shouldn't use a BO marked as purgeable if at least 623 * someone else retained its content by incrementing usecnt. 624 * Luckily the BO hasn't been purged yet, but something wrong 625 * is happening here. Just throw an error instead of 626 * authorizing this use case. 627 */ 628 case __VC4_MADV_PURGED: 629 /* We can't use a purged BO. */ 630 default: 631 /* Invalid madv value. */ 632 ret = -EINVAL; 633 break; 634 } 635 mutex_unlock(&bo->madv_lock); 636 637 return ret; 638 } 639 640 void vc4_bo_dec_usecnt(struct vc4_bo *bo) 641 { 642 /* Fast path: if the BO is still retained by someone, no need to test 643 * the madv value. 644 */ 645 if (refcount_dec_not_one(&bo->usecnt)) 646 return; 647 648 mutex_lock(&bo->madv_lock); 649 if (refcount_dec_and_test(&bo->usecnt) && 650 bo->madv == VC4_MADV_DONTNEED) 651 vc4_bo_add_to_purgeable_pool(bo); 652 mutex_unlock(&bo->madv_lock); 653 } 654 655 static void vc4_bo_cache_time_timer(struct timer_list *t) 656 { 657 struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer); 658 659 schedule_work(&vc4->bo_cache.time_work); 660 } 661 662 static struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags) 663 { 664 struct vc4_bo *bo = to_vc4_bo(obj); 665 struct dma_buf *dmabuf; 666 int ret; 667 668 if (bo->validated_shader) { 669 DRM_DEBUG("Attempting to export shader BO\n"); 670 return ERR_PTR(-EINVAL); 671 } 672 673 /* Note: as soon as the BO is exported it becomes unpurgeable, because 674 * noone ever decrements the usecnt even if the reference held by the 675 * exported BO is released. This shouldn't be a problem since we don't 676 * expect exported BOs to be marked as purgeable. 677 */ 678 ret = vc4_bo_inc_usecnt(bo); 679 if (ret) { 680 DRM_ERROR("Failed to increment BO usecnt\n"); 681 return ERR_PTR(ret); 682 } 683 684 dmabuf = drm_gem_prime_export(obj, flags); 685 if (IS_ERR(dmabuf)) 686 vc4_bo_dec_usecnt(bo); 687 688 return dmabuf; 689 } 690 691 static vm_fault_t vc4_fault(struct vm_fault *vmf) 692 { 693 struct vm_area_struct *vma = vmf->vma; 694 struct drm_gem_object *obj = vma->vm_private_data; 695 struct vc4_bo *bo = to_vc4_bo(obj); 696 697 /* The only reason we would end up here is when user-space accesses 698 * BO's memory after it's been purged. 699 */ 700 mutex_lock(&bo->madv_lock); 701 WARN_ON(bo->madv != __VC4_MADV_PURGED); 702 mutex_unlock(&bo->madv_lock); 703 704 return VM_FAULT_SIGBUS; 705 } 706 707 static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 708 { 709 struct vc4_bo *bo = to_vc4_bo(obj); 710 711 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { 712 DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n"); 713 return -EINVAL; 714 } 715 716 if (bo->madv != VC4_MADV_WILLNEED) { 717 DRM_DEBUG("mmaping of %s BO not allowed\n", 718 bo->madv == VC4_MADV_DONTNEED ? 719 "purgeable" : "purged"); 720 return -EINVAL; 721 } 722 723 return drm_gem_cma_mmap(&bo->base, vma); 724 } 725 726 static const struct vm_operations_struct vc4_vm_ops = { 727 .fault = vc4_fault, 728 .open = drm_gem_vm_open, 729 .close = drm_gem_vm_close, 730 }; 731 732 static const struct drm_gem_object_funcs vc4_gem_object_funcs = { 733 .free = vc4_free_object, 734 .export = vc4_prime_export, 735 .get_sg_table = drm_gem_cma_object_get_sg_table, 736 .vmap = drm_gem_cma_object_vmap, 737 .mmap = vc4_gem_object_mmap, 738 .vm_ops = &vc4_vm_ops, 739 }; 740 741 static int vc4_grab_bin_bo(struct vc4_dev *vc4, struct vc4_file *vc4file) 742 { 743 if (!vc4->v3d) 744 return -ENODEV; 745 746 if (vc4file->bin_bo_used) 747 return 0; 748 749 return vc4_v3d_bin_bo_get(vc4, &vc4file->bin_bo_used); 750 } 751 752 int vc4_create_bo_ioctl(struct drm_device *dev, void *data, 753 struct drm_file *file_priv) 754 { 755 struct drm_vc4_create_bo *args = data; 756 struct vc4_file *vc4file = file_priv->driver_priv; 757 struct vc4_dev *vc4 = to_vc4_dev(dev); 758 struct vc4_bo *bo = NULL; 759 int ret; 760 761 ret = vc4_grab_bin_bo(vc4, vc4file); 762 if (ret) 763 return ret; 764 765 /* 766 * We can't allocate from the BO cache, because the BOs don't 767 * get zeroed, and that might leak data between users. 768 */ 769 bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D); 770 if (IS_ERR(bo)) 771 return PTR_ERR(bo); 772 773 bo->madv = VC4_MADV_WILLNEED; 774 775 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); 776 drm_gem_object_put(&bo->base.base); 777 778 return ret; 779 } 780 781 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, 782 struct drm_file *file_priv) 783 { 784 struct drm_vc4_mmap_bo *args = data; 785 struct drm_gem_object *gem_obj; 786 787 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 788 if (!gem_obj) { 789 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 790 return -EINVAL; 791 } 792 793 /* The mmap offset was set up at BO allocation time. */ 794 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); 795 796 drm_gem_object_put(gem_obj); 797 return 0; 798 } 799 800 int 801 vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, 802 struct drm_file *file_priv) 803 { 804 struct drm_vc4_create_shader_bo *args = data; 805 struct vc4_file *vc4file = file_priv->driver_priv; 806 struct vc4_dev *vc4 = to_vc4_dev(dev); 807 struct vc4_bo *bo = NULL; 808 int ret; 809 810 if (args->size == 0) 811 return -EINVAL; 812 813 if (args->size % sizeof(u64) != 0) 814 return -EINVAL; 815 816 if (args->flags != 0) { 817 DRM_INFO("Unknown flags set: 0x%08x\n", args->flags); 818 return -EINVAL; 819 } 820 821 if (args->pad != 0) { 822 DRM_INFO("Pad set: 0x%08x\n", args->pad); 823 return -EINVAL; 824 } 825 826 ret = vc4_grab_bin_bo(vc4, vc4file); 827 if (ret) 828 return ret; 829 830 bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER); 831 if (IS_ERR(bo)) 832 return PTR_ERR(bo); 833 834 bo->madv = VC4_MADV_WILLNEED; 835 836 if (copy_from_user(bo->base.vaddr, 837 (void __user *)(uintptr_t)args->data, 838 args->size)) { 839 ret = -EFAULT; 840 goto fail; 841 } 842 /* Clear the rest of the memory from allocating from the BO 843 * cache. 844 */ 845 memset(bo->base.vaddr + args->size, 0, 846 bo->base.base.size - args->size); 847 848 bo->validated_shader = vc4_validate_shader(&bo->base); 849 if (!bo->validated_shader) { 850 ret = -EINVAL; 851 goto fail; 852 } 853 854 /* We have to create the handle after validation, to avoid 855 * races for users to do doing things like mmap the shader BO. 856 */ 857 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); 858 859 fail: 860 drm_gem_object_put(&bo->base.base); 861 862 return ret; 863 } 864 865 /** 866 * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO. 867 * @dev: DRM device 868 * @data: ioctl argument 869 * @file_priv: DRM file for this fd 870 * 871 * The tiling state of the BO decides the default modifier of an fb if 872 * no specific modifier was set by userspace, and the return value of 873 * vc4_get_tiling_ioctl() (so that userspace can treat a BO it 874 * received from dmabuf as the same tiling format as the producer 875 * used). 876 */ 877 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, 878 struct drm_file *file_priv) 879 { 880 struct drm_vc4_set_tiling *args = data; 881 struct drm_gem_object *gem_obj; 882 struct vc4_bo *bo; 883 bool t_format; 884 885 if (args->flags != 0) 886 return -EINVAL; 887 888 switch (args->modifier) { 889 case DRM_FORMAT_MOD_NONE: 890 t_format = false; 891 break; 892 case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: 893 t_format = true; 894 break; 895 default: 896 return -EINVAL; 897 } 898 899 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 900 if (!gem_obj) { 901 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 902 return -ENOENT; 903 } 904 bo = to_vc4_bo(gem_obj); 905 bo->t_format = t_format; 906 907 drm_gem_object_put(gem_obj); 908 909 return 0; 910 } 911 912 /** 913 * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO. 914 * @dev: DRM device 915 * @data: ioctl argument 916 * @file_priv: DRM file for this fd 917 * 918 * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl(). 919 */ 920 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, 921 struct drm_file *file_priv) 922 { 923 struct drm_vc4_get_tiling *args = data; 924 struct drm_gem_object *gem_obj; 925 struct vc4_bo *bo; 926 927 if (args->flags != 0 || args->modifier != 0) 928 return -EINVAL; 929 930 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 931 if (!gem_obj) { 932 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 933 return -ENOENT; 934 } 935 bo = to_vc4_bo(gem_obj); 936 937 if (bo->t_format) 938 args->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED; 939 else 940 args->modifier = DRM_FORMAT_MOD_NONE; 941 942 drm_gem_object_put(gem_obj); 943 944 return 0; 945 } 946 947 static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused); 948 int vc4_bo_cache_init(struct drm_device *dev) 949 { 950 struct vc4_dev *vc4 = to_vc4_dev(dev); 951 int i; 952 953 /* Create the initial set of BO labels that the kernel will 954 * use. This lets us avoid a bunch of string reallocation in 955 * the kernel's draw and BO allocation paths. 956 */ 957 vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels), 958 GFP_KERNEL); 959 if (!vc4->bo_labels) 960 return -ENOMEM; 961 vc4->num_labels = VC4_BO_TYPE_COUNT; 962 963 BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT); 964 for (i = 0; i < VC4_BO_TYPE_COUNT; i++) 965 vc4->bo_labels[i].name = bo_type_names[i]; 966 967 mutex_init(&vc4->bo_lock); 968 969 vc4_debugfs_add_file(dev, "bo_stats", vc4_bo_stats_debugfs, NULL); 970 971 INIT_LIST_HEAD(&vc4->bo_cache.time_list); 972 973 INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work); 974 timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0); 975 976 return drmm_add_action_or_reset(dev, vc4_bo_cache_destroy, NULL); 977 } 978 979 static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused) 980 { 981 struct vc4_dev *vc4 = to_vc4_dev(dev); 982 int i; 983 984 del_timer(&vc4->bo_cache.time_timer); 985 cancel_work_sync(&vc4->bo_cache.time_work); 986 987 vc4_bo_cache_purge(dev); 988 989 for (i = 0; i < vc4->num_labels; i++) { 990 if (vc4->bo_labels[i].num_allocated) { 991 DRM_ERROR("Destroying BO cache with %d %s " 992 "BOs still allocated\n", 993 vc4->bo_labels[i].num_allocated, 994 vc4->bo_labels[i].name); 995 } 996 997 if (is_user_label(i)) 998 kfree(vc4->bo_labels[i].name); 999 } 1000 kfree(vc4->bo_labels); 1001 } 1002 1003 int vc4_label_bo_ioctl(struct drm_device *dev, void *data, 1004 struct drm_file *file_priv) 1005 { 1006 struct vc4_dev *vc4 = to_vc4_dev(dev); 1007 struct drm_vc4_label_bo *args = data; 1008 char *name; 1009 struct drm_gem_object *gem_obj; 1010 int ret = 0, label; 1011 1012 if (!args->len) 1013 return -EINVAL; 1014 1015 name = strndup_user(u64_to_user_ptr(args->name), args->len + 1); 1016 if (IS_ERR(name)) 1017 return PTR_ERR(name); 1018 1019 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 1020 if (!gem_obj) { 1021 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); 1022 kfree(name); 1023 return -ENOENT; 1024 } 1025 1026 mutex_lock(&vc4->bo_lock); 1027 label = vc4_get_user_label(vc4, name); 1028 if (label != -1) 1029 vc4_bo_set_label(gem_obj, label); 1030 else 1031 ret = -ENOMEM; 1032 mutex_unlock(&vc4->bo_lock); 1033 1034 drm_gem_object_put(gem_obj); 1035 1036 return ret; 1037 } 1038