1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <drm/drmP.h> 29 #include <drm/radeon_drm.h> 30 #include "radeon.h" 31 32 void radeon_gem_object_free(struct drm_gem_object *gobj) 33 { 34 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 35 36 if (robj) { 37 if (robj->gem_base.import_attach) 38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); 39 radeon_mn_unregister(robj); 40 radeon_bo_unref(&robj); 41 } 42 } 43 44 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, 45 int alignment, int initial_domain, 46 u32 flags, bool kernel, 47 struct drm_gem_object **obj) 48 { 49 struct radeon_bo *robj; 50 unsigned long max_size; 51 int r; 52 53 *obj = NULL; 54 /* At least align on page size */ 55 if (alignment < PAGE_SIZE) { 56 alignment = PAGE_SIZE; 57 } 58 59 /* Maximum bo size is the unpinned gtt size since we use the gtt to 60 * handle vram to system pool migrations. 61 */ 62 max_size = rdev->mc.gtt_size - rdev->gart_pin_size; 63 if (size > max_size) { 64 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", 65 size >> 20, max_size >> 20); 66 return -ENOMEM; 67 } 68 69 retry: 70 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, 71 flags, NULL, NULL, &robj); 72 if (r) { 73 if (r != -ERESTARTSYS) { 74 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { 75 initial_domain |= RADEON_GEM_DOMAIN_GTT; 76 goto retry; 77 } 78 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", 79 size, initial_domain, alignment, r); 80 } 81 return r; 82 } 83 *obj = &robj->gem_base; 84 robj->pid = task_pid_nr(current); 85 86 mutex_lock(&rdev->gem.mutex); 87 list_add_tail(&robj->list, &rdev->gem.objects); 88 mutex_unlock(&rdev->gem.mutex); 89 90 return 0; 91 } 92 93 static int radeon_gem_set_domain(struct drm_gem_object *gobj, 94 uint32_t rdomain, uint32_t wdomain) 95 { 96 struct radeon_bo *robj; 97 uint32_t domain; 98 long r; 99 100 /* FIXME: reeimplement */ 101 robj = gem_to_radeon_bo(gobj); 102 /* work out where to validate the buffer to */ 103 domain = wdomain; 104 if (!domain) { 105 domain = rdomain; 106 } 107 if (!domain) { 108 /* Do nothings */ 109 pr_warn("Set domain without domain !\n"); 110 return 0; 111 } 112 if (domain == RADEON_GEM_DOMAIN_CPU) { 113 /* Asking for cpu access wait for object idle */ 114 r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); 115 if (!r) 116 r = -EBUSY; 117 118 if (r < 0 && r != -EINTR) { 119 pr_err("Failed to wait for object: %li\n", r); 120 return r; 121 } 122 } 123 if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) { 124 /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */ 125 return -EINVAL; 126 } 127 return 0; 128 } 129 130 int radeon_gem_init(struct radeon_device *rdev) 131 { 132 INIT_LIST_HEAD(&rdev->gem.objects); 133 return 0; 134 } 135 136 void radeon_gem_fini(struct radeon_device *rdev) 137 { 138 radeon_bo_force_delete(rdev); 139 } 140 141 /* 142 * Call from drm_gem_handle_create which appear in both new and open ioctl 143 * case. 144 */ 145 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 146 { 147 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 148 struct radeon_device *rdev = rbo->rdev; 149 struct radeon_fpriv *fpriv = file_priv->driver_priv; 150 struct radeon_vm *vm = &fpriv->vm; 151 struct radeon_bo_va *bo_va; 152 int r; 153 154 if ((rdev->family < CHIP_CAYMAN) || 155 (!rdev->accel_working)) { 156 return 0; 157 } 158 159 r = radeon_bo_reserve(rbo, false); 160 if (r) { 161 return r; 162 } 163 164 bo_va = radeon_vm_bo_find(vm, rbo); 165 if (!bo_va) { 166 bo_va = radeon_vm_bo_add(rdev, vm, rbo); 167 } else { 168 ++bo_va->ref_count; 169 } 170 radeon_bo_unreserve(rbo); 171 172 return 0; 173 } 174 175 void radeon_gem_object_close(struct drm_gem_object *obj, 176 struct drm_file *file_priv) 177 { 178 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 179 struct radeon_device *rdev = rbo->rdev; 180 struct radeon_fpriv *fpriv = file_priv->driver_priv; 181 struct radeon_vm *vm = &fpriv->vm; 182 struct radeon_bo_va *bo_va; 183 int r; 184 185 if ((rdev->family < CHIP_CAYMAN) || 186 (!rdev->accel_working)) { 187 return; 188 } 189 190 r = radeon_bo_reserve(rbo, true); 191 if (r) { 192 dev_err(rdev->dev, "leaking bo va because " 193 "we fail to reserve bo (%d)\n", r); 194 return; 195 } 196 bo_va = radeon_vm_bo_find(vm, rbo); 197 if (bo_va) { 198 if (--bo_va->ref_count == 0) { 199 radeon_vm_bo_rmv(rdev, bo_va); 200 } 201 } 202 radeon_bo_unreserve(rbo); 203 } 204 205 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) 206 { 207 if (r == -EDEADLK) { 208 r = radeon_gpu_reset(rdev); 209 if (!r) 210 r = -EAGAIN; 211 } 212 return r; 213 } 214 215 /* 216 * GEM ioctls. 217 */ 218 int radeon_gem_info_ioctl(struct drm_device *dev, void *data, 219 struct drm_file *filp) 220 { 221 struct radeon_device *rdev = dev->dev_private; 222 struct drm_radeon_gem_info *args = data; 223 struct ttm_mem_type_manager *man; 224 225 man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 226 227 args->vram_size = (u64)man->size << PAGE_SHIFT; 228 args->vram_visible = rdev->mc.visible_vram_size; 229 args->vram_visible -= rdev->vram_pin_size; 230 args->gart_size = rdev->mc.gtt_size; 231 args->gart_size -= rdev->gart_pin_size; 232 233 return 0; 234 } 235 236 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, 237 struct drm_file *filp) 238 { 239 /* TODO: implement */ 240 DRM_ERROR("unimplemented %s\n", __func__); 241 return -ENOSYS; 242 } 243 244 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, 245 struct drm_file *filp) 246 { 247 /* TODO: implement */ 248 DRM_ERROR("unimplemented %s\n", __func__); 249 return -ENOSYS; 250 } 251 252 int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 253 struct drm_file *filp) 254 { 255 struct radeon_device *rdev = dev->dev_private; 256 struct drm_radeon_gem_create *args = data; 257 struct drm_gem_object *gobj; 258 uint32_t handle; 259 int r; 260 261 down_read(&rdev->exclusive_lock); 262 /* create a gem object to contain this object in */ 263 args->size = roundup(args->size, PAGE_SIZE); 264 r = radeon_gem_object_create(rdev, args->size, args->alignment, 265 args->initial_domain, args->flags, 266 false, &gobj); 267 if (r) { 268 up_read(&rdev->exclusive_lock); 269 r = radeon_gem_handle_lockup(rdev, r); 270 return r; 271 } 272 r = drm_gem_handle_create(filp, gobj, &handle); 273 /* drop reference from allocate - handle holds it now */ 274 drm_gem_object_put_unlocked(gobj); 275 if (r) { 276 up_read(&rdev->exclusive_lock); 277 r = radeon_gem_handle_lockup(rdev, r); 278 return r; 279 } 280 args->handle = handle; 281 up_read(&rdev->exclusive_lock); 282 return 0; 283 } 284 285 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, 286 struct drm_file *filp) 287 { 288 struct ttm_operation_ctx ctx = { true, false }; 289 struct radeon_device *rdev = dev->dev_private; 290 struct drm_radeon_gem_userptr *args = data; 291 struct drm_gem_object *gobj; 292 struct radeon_bo *bo; 293 uint32_t handle; 294 int r; 295 296 if (offset_in_page(args->addr | args->size)) 297 return -EINVAL; 298 299 /* reject unknown flag values */ 300 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | 301 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE | 302 RADEON_GEM_USERPTR_REGISTER)) 303 return -EINVAL; 304 305 if (args->flags & RADEON_GEM_USERPTR_READONLY) { 306 /* readonly pages not tested on older hardware */ 307 if (rdev->family < CHIP_R600) 308 return -EINVAL; 309 310 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) || 311 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) { 312 313 /* if we want to write to it we must require anonymous 314 memory and install a MMU notifier */ 315 return -EACCES; 316 } 317 318 down_read(&rdev->exclusive_lock); 319 320 /* create a gem object to contain this object in */ 321 r = radeon_gem_object_create(rdev, args->size, 0, 322 RADEON_GEM_DOMAIN_CPU, 0, 323 false, &gobj); 324 if (r) 325 goto handle_lockup; 326 327 bo = gem_to_radeon_bo(gobj); 328 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); 329 if (r) 330 goto release_object; 331 332 if (args->flags & RADEON_GEM_USERPTR_REGISTER) { 333 r = radeon_mn_register(bo, args->addr); 334 if (r) 335 goto release_object; 336 } 337 338 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { 339 down_read(¤t->mm->mmap_sem); 340 r = radeon_bo_reserve(bo, true); 341 if (r) { 342 up_read(¤t->mm->mmap_sem); 343 goto release_object; 344 } 345 346 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); 347 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 348 radeon_bo_unreserve(bo); 349 up_read(¤t->mm->mmap_sem); 350 if (r) 351 goto release_object; 352 } 353 354 r = drm_gem_handle_create(filp, gobj, &handle); 355 /* drop reference from allocate - handle holds it now */ 356 drm_gem_object_put_unlocked(gobj); 357 if (r) 358 goto handle_lockup; 359 360 args->handle = handle; 361 up_read(&rdev->exclusive_lock); 362 return 0; 363 364 release_object: 365 drm_gem_object_put_unlocked(gobj); 366 367 handle_lockup: 368 up_read(&rdev->exclusive_lock); 369 r = radeon_gem_handle_lockup(rdev, r); 370 371 return r; 372 } 373 374 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 375 struct drm_file *filp) 376 { 377 /* transition the BO to a domain - 378 * just validate the BO into a certain domain */ 379 struct radeon_device *rdev = dev->dev_private; 380 struct drm_radeon_gem_set_domain *args = data; 381 struct drm_gem_object *gobj; 382 struct radeon_bo *robj; 383 int r; 384 385 /* for now if someone requests domain CPU - 386 * just make sure the buffer is finished with */ 387 down_read(&rdev->exclusive_lock); 388 389 /* just do a BO wait for now */ 390 gobj = drm_gem_object_lookup(filp, args->handle); 391 if (gobj == NULL) { 392 up_read(&rdev->exclusive_lock); 393 return -ENOENT; 394 } 395 robj = gem_to_radeon_bo(gobj); 396 397 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 398 399 drm_gem_object_put_unlocked(gobj); 400 up_read(&rdev->exclusive_lock); 401 r = radeon_gem_handle_lockup(robj->rdev, r); 402 return r; 403 } 404 405 int radeon_mode_dumb_mmap(struct drm_file *filp, 406 struct drm_device *dev, 407 uint32_t handle, uint64_t *offset_p) 408 { 409 struct drm_gem_object *gobj; 410 struct radeon_bo *robj; 411 412 gobj = drm_gem_object_lookup(filp, handle); 413 if (gobj == NULL) { 414 return -ENOENT; 415 } 416 robj = gem_to_radeon_bo(gobj); 417 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { 418 drm_gem_object_put_unlocked(gobj); 419 return -EPERM; 420 } 421 *offset_p = radeon_bo_mmap_offset(robj); 422 drm_gem_object_put_unlocked(gobj); 423 return 0; 424 } 425 426 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 427 struct drm_file *filp) 428 { 429 struct drm_radeon_gem_mmap *args = data; 430 431 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); 432 } 433 434 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 435 struct drm_file *filp) 436 { 437 struct drm_radeon_gem_busy *args = data; 438 struct drm_gem_object *gobj; 439 struct radeon_bo *robj; 440 int r; 441 uint32_t cur_placement = 0; 442 443 gobj = drm_gem_object_lookup(filp, args->handle); 444 if (gobj == NULL) { 445 return -ENOENT; 446 } 447 robj = gem_to_radeon_bo(gobj); 448 449 r = reservation_object_test_signaled_rcu(robj->tbo.resv, true); 450 if (r == 0) 451 r = -EBUSY; 452 else 453 r = 0; 454 455 cur_placement = READ_ONCE(robj->tbo.mem.mem_type); 456 args->domain = radeon_mem_type_to_domain(cur_placement); 457 drm_gem_object_put_unlocked(gobj); 458 return r; 459 } 460 461 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 462 struct drm_file *filp) 463 { 464 struct radeon_device *rdev = dev->dev_private; 465 struct drm_radeon_gem_wait_idle *args = data; 466 struct drm_gem_object *gobj; 467 struct radeon_bo *robj; 468 int r = 0; 469 uint32_t cur_placement = 0; 470 long ret; 471 472 gobj = drm_gem_object_lookup(filp, args->handle); 473 if (gobj == NULL) { 474 return -ENOENT; 475 } 476 robj = gem_to_radeon_bo(gobj); 477 478 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); 479 if (ret == 0) 480 r = -EBUSY; 481 else if (ret < 0) 482 r = ret; 483 484 /* Flush HDP cache via MMIO if necessary */ 485 cur_placement = READ_ONCE(robj->tbo.mem.mem_type); 486 if (rdev->asic->mmio_hdp_flush && 487 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) 488 robj->rdev->asic->mmio_hdp_flush(rdev); 489 drm_gem_object_put_unlocked(gobj); 490 r = radeon_gem_handle_lockup(rdev, r); 491 return r; 492 } 493 494 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 495 struct drm_file *filp) 496 { 497 struct drm_radeon_gem_set_tiling *args = data; 498 struct drm_gem_object *gobj; 499 struct radeon_bo *robj; 500 int r = 0; 501 502 DRM_DEBUG("%d \n", args->handle); 503 gobj = drm_gem_object_lookup(filp, args->handle); 504 if (gobj == NULL) 505 return -ENOENT; 506 robj = gem_to_radeon_bo(gobj); 507 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); 508 drm_gem_object_put_unlocked(gobj); 509 return r; 510 } 511 512 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 513 struct drm_file *filp) 514 { 515 struct drm_radeon_gem_get_tiling *args = data; 516 struct drm_gem_object *gobj; 517 struct radeon_bo *rbo; 518 int r = 0; 519 520 DRM_DEBUG("\n"); 521 gobj = drm_gem_object_lookup(filp, args->handle); 522 if (gobj == NULL) 523 return -ENOENT; 524 rbo = gem_to_radeon_bo(gobj); 525 r = radeon_bo_reserve(rbo, false); 526 if (unlikely(r != 0)) 527 goto out; 528 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 529 radeon_bo_unreserve(rbo); 530 out: 531 drm_gem_object_put_unlocked(gobj); 532 return r; 533 } 534 535 /** 536 * radeon_gem_va_update_vm -update the bo_va in its VM 537 * 538 * @rdev: radeon_device pointer 539 * @bo_va: bo_va to update 540 * 541 * Update the bo_va directly after setting it's address. Errors are not 542 * vital here, so they are not reported back to userspace. 543 */ 544 static void radeon_gem_va_update_vm(struct radeon_device *rdev, 545 struct radeon_bo_va *bo_va) 546 { 547 struct ttm_validate_buffer tv, *entry; 548 struct radeon_bo_list *vm_bos; 549 struct ww_acquire_ctx ticket; 550 struct list_head list; 551 unsigned domain; 552 int r; 553 554 INIT_LIST_HEAD(&list); 555 556 tv.bo = &bo_va->bo->tbo; 557 tv.shared = true; 558 list_add(&tv.head, &list); 559 560 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list); 561 if (!vm_bos) 562 return; 563 564 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); 565 if (r) 566 goto error_free; 567 568 list_for_each_entry(entry, &list, head) { 569 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type); 570 /* if anything is swapped out don't swap it in here, 571 just abort and wait for the next CS */ 572 if (domain == RADEON_GEM_DOMAIN_CPU) 573 goto error_unreserve; 574 } 575 576 mutex_lock(&bo_va->vm->mutex); 577 r = radeon_vm_clear_freed(rdev, bo_va->vm); 578 if (r) 579 goto error_unlock; 580 581 if (bo_va->it.start) 582 r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem); 583 584 error_unlock: 585 mutex_unlock(&bo_va->vm->mutex); 586 587 error_unreserve: 588 ttm_eu_backoff_reservation(&ticket, &list); 589 590 error_free: 591 kvfree(vm_bos); 592 593 if (r && r != -ERESTARTSYS) 594 DRM_ERROR("Couldn't update BO_VA (%d)\n", r); 595 } 596 597 int radeon_gem_va_ioctl(struct drm_device *dev, void *data, 598 struct drm_file *filp) 599 { 600 struct drm_radeon_gem_va *args = data; 601 struct drm_gem_object *gobj; 602 struct radeon_device *rdev = dev->dev_private; 603 struct radeon_fpriv *fpriv = filp->driver_priv; 604 struct radeon_bo *rbo; 605 struct radeon_bo_va *bo_va; 606 u32 invalid_flags; 607 int r = 0; 608 609 if (!rdev->vm_manager.enabled) { 610 args->operation = RADEON_VA_RESULT_ERROR; 611 return -ENOTTY; 612 } 613 614 /* !! DONT REMOVE !! 615 * We don't support vm_id yet, to be sure we don't have have broken 616 * userspace, reject anyone trying to use non 0 value thus moving 617 * forward we can use those fields without breaking existant userspace 618 */ 619 if (args->vm_id) { 620 args->operation = RADEON_VA_RESULT_ERROR; 621 return -EINVAL; 622 } 623 624 if (args->offset < RADEON_VA_RESERVED_SIZE) { 625 dev_err(&dev->pdev->dev, 626 "offset 0x%lX is in reserved area 0x%X\n", 627 (unsigned long)args->offset, 628 RADEON_VA_RESERVED_SIZE); 629 args->operation = RADEON_VA_RESULT_ERROR; 630 return -EINVAL; 631 } 632 633 /* don't remove, we need to enforce userspace to set the snooped flag 634 * otherwise we will endup with broken userspace and we won't be able 635 * to enable this feature without adding new interface 636 */ 637 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; 638 if ((args->flags & invalid_flags)) { 639 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", 640 args->flags, invalid_flags); 641 args->operation = RADEON_VA_RESULT_ERROR; 642 return -EINVAL; 643 } 644 645 switch (args->operation) { 646 case RADEON_VA_MAP: 647 case RADEON_VA_UNMAP: 648 break; 649 default: 650 dev_err(&dev->pdev->dev, "unsupported operation %d\n", 651 args->operation); 652 args->operation = RADEON_VA_RESULT_ERROR; 653 return -EINVAL; 654 } 655 656 gobj = drm_gem_object_lookup(filp, args->handle); 657 if (gobj == NULL) { 658 args->operation = RADEON_VA_RESULT_ERROR; 659 return -ENOENT; 660 } 661 rbo = gem_to_radeon_bo(gobj); 662 r = radeon_bo_reserve(rbo, false); 663 if (r) { 664 args->operation = RADEON_VA_RESULT_ERROR; 665 drm_gem_object_put_unlocked(gobj); 666 return r; 667 } 668 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); 669 if (!bo_va) { 670 args->operation = RADEON_VA_RESULT_ERROR; 671 radeon_bo_unreserve(rbo); 672 drm_gem_object_put_unlocked(gobj); 673 return -ENOENT; 674 } 675 676 switch (args->operation) { 677 case RADEON_VA_MAP: 678 if (bo_va->it.start) { 679 args->operation = RADEON_VA_RESULT_VA_EXIST; 680 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; 681 radeon_bo_unreserve(rbo); 682 goto out; 683 } 684 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); 685 break; 686 case RADEON_VA_UNMAP: 687 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0); 688 break; 689 default: 690 break; 691 } 692 if (!r) 693 radeon_gem_va_update_vm(rdev, bo_va); 694 args->operation = RADEON_VA_RESULT_OK; 695 if (r) { 696 args->operation = RADEON_VA_RESULT_ERROR; 697 } 698 out: 699 drm_gem_object_put_unlocked(gobj); 700 return r; 701 } 702 703 int radeon_gem_op_ioctl(struct drm_device *dev, void *data, 704 struct drm_file *filp) 705 { 706 struct drm_radeon_gem_op *args = data; 707 struct drm_gem_object *gobj; 708 struct radeon_bo *robj; 709 int r; 710 711 gobj = drm_gem_object_lookup(filp, args->handle); 712 if (gobj == NULL) { 713 return -ENOENT; 714 } 715 robj = gem_to_radeon_bo(gobj); 716 717 r = -EPERM; 718 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) 719 goto out; 720 721 r = radeon_bo_reserve(robj, false); 722 if (unlikely(r)) 723 goto out; 724 725 switch (args->op) { 726 case RADEON_GEM_OP_GET_INITIAL_DOMAIN: 727 args->value = robj->initial_domain; 728 break; 729 case RADEON_GEM_OP_SET_INITIAL_DOMAIN: 730 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | 731 RADEON_GEM_DOMAIN_GTT | 732 RADEON_GEM_DOMAIN_CPU); 733 break; 734 default: 735 r = -EINVAL; 736 } 737 738 radeon_bo_unreserve(robj); 739 out: 740 drm_gem_object_put_unlocked(gobj); 741 return r; 742 } 743 744 int radeon_mode_dumb_create(struct drm_file *file_priv, 745 struct drm_device *dev, 746 struct drm_mode_create_dumb *args) 747 { 748 struct radeon_device *rdev = dev->dev_private; 749 struct drm_gem_object *gobj; 750 uint32_t handle; 751 int r; 752 753 args->pitch = radeon_align_pitch(rdev, args->width, 754 DIV_ROUND_UP(args->bpp, 8), 0); 755 args->size = args->pitch * args->height; 756 args->size = ALIGN(args->size, PAGE_SIZE); 757 758 r = radeon_gem_object_create(rdev, args->size, 0, 759 RADEON_GEM_DOMAIN_VRAM, 0, 760 false, &gobj); 761 if (r) 762 return -ENOMEM; 763 764 r = drm_gem_handle_create(file_priv, gobj, &handle); 765 /* drop reference from allocate - handle holds it now */ 766 drm_gem_object_put_unlocked(gobj); 767 if (r) { 768 return r; 769 } 770 args->handle = handle; 771 return 0; 772 } 773 774 #if defined(CONFIG_DEBUG_FS) 775 static int radeon_debugfs_gem_info(struct seq_file *m, void *data) 776 { 777 struct drm_info_node *node = (struct drm_info_node *)m->private; 778 struct drm_device *dev = node->minor->dev; 779 struct radeon_device *rdev = dev->dev_private; 780 struct radeon_bo *rbo; 781 unsigned i = 0; 782 783 mutex_lock(&rdev->gem.mutex); 784 list_for_each_entry(rbo, &rdev->gem.objects, list) { 785 unsigned domain; 786 const char *placement; 787 788 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type); 789 switch (domain) { 790 case RADEON_GEM_DOMAIN_VRAM: 791 placement = "VRAM"; 792 break; 793 case RADEON_GEM_DOMAIN_GTT: 794 placement = " GTT"; 795 break; 796 case RADEON_GEM_DOMAIN_CPU: 797 default: 798 placement = " CPU"; 799 break; 800 } 801 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", 802 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20, 803 placement, (unsigned long)rbo->pid); 804 i++; 805 } 806 mutex_unlock(&rdev->gem.mutex); 807 return 0; 808 } 809 810 static struct drm_info_list radeon_debugfs_gem_list[] = { 811 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL}, 812 }; 813 #endif 814 815 int radeon_gem_debugfs_init(struct radeon_device *rdev) 816 { 817 #if defined(CONFIG_DEBUG_FS) 818 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1); 819 #endif 820 return 0; 821 } 822