1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <drm/drmP.h> 29 #include <drm/radeon_drm.h> 30 #include "radeon.h" 31 32 void radeon_gem_object_free(struct drm_gem_object *gobj) 33 { 34 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 35 36 if (robj) { 37 if (robj->gem_base.import_attach) 38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); 39 radeon_bo_unref(&robj); 40 } 41 } 42 43 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, 44 int alignment, int initial_domain, 45 u32 flags, bool kernel, 46 struct drm_gem_object **obj) 47 { 48 struct radeon_bo *robj; 49 unsigned long max_size; 50 int r; 51 52 *obj = NULL; 53 /* At least align on page size */ 54 if (alignment < PAGE_SIZE) { 55 alignment = PAGE_SIZE; 56 } 57 58 /* Maximum bo size is the unpinned gtt size since we use the gtt to 59 * handle vram to system pool migrations. 60 */ 61 max_size = rdev->mc.gtt_size - rdev->gart_pin_size; 62 if (size > max_size) { 63 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", 64 size >> 20, max_size >> 20); 65 return -ENOMEM; 66 } 67 68 retry: 69 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, 70 flags, NULL, NULL, &robj); 71 if (r) { 72 if (r != -ERESTARTSYS) { 73 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { 74 initial_domain |= RADEON_GEM_DOMAIN_GTT; 75 goto retry; 76 } 77 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", 78 size, initial_domain, alignment, r); 79 } 80 return r; 81 } 82 *obj = &robj->gem_base; 83 robj->pid = task_pid_nr(current); 84 85 mutex_lock(&rdev->gem.mutex); 86 list_add_tail(&robj->list, &rdev->gem.objects); 87 mutex_unlock(&rdev->gem.mutex); 88 89 return 0; 90 } 91 92 static int radeon_gem_set_domain(struct drm_gem_object *gobj, 93 uint32_t rdomain, uint32_t wdomain) 94 { 95 struct radeon_bo *robj; 96 uint32_t domain; 97 long r; 98 99 /* FIXME: reeimplement */ 100 robj = gem_to_radeon_bo(gobj); 101 /* work out where to validate the buffer to */ 102 domain = wdomain; 103 if (!domain) { 104 domain = rdomain; 105 } 106 if (!domain) { 107 /* Do nothings */ 108 printk(KERN_WARNING "Set domain without domain !\n"); 109 return 0; 110 } 111 if (domain == RADEON_GEM_DOMAIN_CPU) { 112 /* Asking for cpu access wait for object idle */ 113 r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); 114 if (!r) 115 r = -EBUSY; 116 117 if (r < 0 && r != -EINTR) { 118 printk(KERN_ERR "Failed to wait for object: %li\n", r); 119 return r; 120 } 121 } 122 return 0; 123 } 124 125 int radeon_gem_init(struct radeon_device *rdev) 126 { 127 INIT_LIST_HEAD(&rdev->gem.objects); 128 return 0; 129 } 130 131 void radeon_gem_fini(struct radeon_device *rdev) 132 { 133 radeon_bo_force_delete(rdev); 134 } 135 136 /* 137 * Call from drm_gem_handle_create which appear in both new and open ioctl 138 * case. 139 */ 140 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 141 { 142 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 143 struct radeon_device *rdev = rbo->rdev; 144 struct radeon_fpriv *fpriv = file_priv->driver_priv; 145 struct radeon_vm *vm = &fpriv->vm; 146 struct radeon_bo_va *bo_va; 147 int r; 148 149 if (rdev->family < CHIP_CAYMAN) { 150 return 0; 151 } 152 153 r = radeon_bo_reserve(rbo, false); 154 if (r) { 155 return r; 156 } 157 158 bo_va = radeon_vm_bo_find(vm, rbo); 159 if (!bo_va) { 160 bo_va = radeon_vm_bo_add(rdev, vm, rbo); 161 } else { 162 ++bo_va->ref_count; 163 } 164 radeon_bo_unreserve(rbo); 165 166 return 0; 167 } 168 169 void radeon_gem_object_close(struct drm_gem_object *obj, 170 struct drm_file *file_priv) 171 { 172 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 173 struct radeon_device *rdev = rbo->rdev; 174 struct radeon_fpriv *fpriv = file_priv->driver_priv; 175 struct radeon_vm *vm = &fpriv->vm; 176 struct radeon_bo_va *bo_va; 177 int r; 178 179 if (rdev->family < CHIP_CAYMAN) { 180 return; 181 } 182 183 r = radeon_bo_reserve(rbo, true); 184 if (r) { 185 dev_err(rdev->dev, "leaking bo va because " 186 "we fail to reserve bo (%d)\n", r); 187 return; 188 } 189 bo_va = radeon_vm_bo_find(vm, rbo); 190 if (bo_va) { 191 if (--bo_va->ref_count == 0) { 192 radeon_vm_bo_rmv(rdev, bo_va); 193 } 194 } 195 radeon_bo_unreserve(rbo); 196 } 197 198 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) 199 { 200 if (r == -EDEADLK) { 201 r = radeon_gpu_reset(rdev); 202 if (!r) 203 r = -EAGAIN; 204 } 205 return r; 206 } 207 208 /* 209 * GEM ioctls. 210 */ 211 int radeon_gem_info_ioctl(struct drm_device *dev, void *data, 212 struct drm_file *filp) 213 { 214 struct radeon_device *rdev = dev->dev_private; 215 struct drm_radeon_gem_info *args = data; 216 struct ttm_mem_type_manager *man; 217 218 man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 219 220 args->vram_size = rdev->mc.real_vram_size; 221 args->vram_visible = (u64)man->size << PAGE_SHIFT; 222 args->vram_visible -= rdev->vram_pin_size; 223 args->gart_size = rdev->mc.gtt_size; 224 args->gart_size -= rdev->gart_pin_size; 225 226 return 0; 227 } 228 229 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, 230 struct drm_file *filp) 231 { 232 /* TODO: implement */ 233 DRM_ERROR("unimplemented %s\n", __func__); 234 return -ENOSYS; 235 } 236 237 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, 238 struct drm_file *filp) 239 { 240 /* TODO: implement */ 241 DRM_ERROR("unimplemented %s\n", __func__); 242 return -ENOSYS; 243 } 244 245 int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 246 struct drm_file *filp) 247 { 248 struct radeon_device *rdev = dev->dev_private; 249 struct drm_radeon_gem_create *args = data; 250 struct drm_gem_object *gobj; 251 uint32_t handle; 252 int r; 253 254 down_read(&rdev->exclusive_lock); 255 /* create a gem object to contain this object in */ 256 args->size = roundup(args->size, PAGE_SIZE); 257 r = radeon_gem_object_create(rdev, args->size, args->alignment, 258 args->initial_domain, args->flags, 259 false, &gobj); 260 if (r) { 261 up_read(&rdev->exclusive_lock); 262 r = radeon_gem_handle_lockup(rdev, r); 263 return r; 264 } 265 r = drm_gem_handle_create(filp, gobj, &handle); 266 /* drop reference from allocate - handle holds it now */ 267 drm_gem_object_unreference_unlocked(gobj); 268 if (r) { 269 up_read(&rdev->exclusive_lock); 270 r = radeon_gem_handle_lockup(rdev, r); 271 return r; 272 } 273 args->handle = handle; 274 up_read(&rdev->exclusive_lock); 275 return 0; 276 } 277 278 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, 279 struct drm_file *filp) 280 { 281 struct radeon_device *rdev = dev->dev_private; 282 struct drm_radeon_gem_userptr *args = data; 283 struct drm_gem_object *gobj; 284 struct radeon_bo *bo; 285 uint32_t handle; 286 int r; 287 288 if (offset_in_page(args->addr | args->size)) 289 return -EINVAL; 290 291 /* reject unknown flag values */ 292 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | 293 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE | 294 RADEON_GEM_USERPTR_REGISTER)) 295 return -EINVAL; 296 297 if (args->flags & RADEON_GEM_USERPTR_READONLY) { 298 /* readonly pages not tested on older hardware */ 299 if (rdev->family < CHIP_R600) 300 return -EINVAL; 301 302 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) || 303 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) { 304 305 /* if we want to write to it we must require anonymous 306 memory and install a MMU notifier */ 307 return -EACCES; 308 } 309 310 down_read(&rdev->exclusive_lock); 311 312 /* create a gem object to contain this object in */ 313 r = radeon_gem_object_create(rdev, args->size, 0, 314 RADEON_GEM_DOMAIN_CPU, 0, 315 false, &gobj); 316 if (r) 317 goto handle_lockup; 318 319 bo = gem_to_radeon_bo(gobj); 320 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); 321 if (r) 322 goto release_object; 323 324 if (args->flags & RADEON_GEM_USERPTR_REGISTER) { 325 r = radeon_mn_register(bo, args->addr); 326 if (r) 327 goto release_object; 328 } 329 330 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { 331 down_read(¤t->mm->mmap_sem); 332 r = radeon_bo_reserve(bo, true); 333 if (r) { 334 up_read(¤t->mm->mmap_sem); 335 goto release_object; 336 } 337 338 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); 339 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 340 radeon_bo_unreserve(bo); 341 up_read(¤t->mm->mmap_sem); 342 if (r) 343 goto release_object; 344 } 345 346 r = drm_gem_handle_create(filp, gobj, &handle); 347 /* drop reference from allocate - handle holds it now */ 348 drm_gem_object_unreference_unlocked(gobj); 349 if (r) 350 goto handle_lockup; 351 352 args->handle = handle; 353 up_read(&rdev->exclusive_lock); 354 return 0; 355 356 release_object: 357 drm_gem_object_unreference_unlocked(gobj); 358 359 handle_lockup: 360 up_read(&rdev->exclusive_lock); 361 r = radeon_gem_handle_lockup(rdev, r); 362 363 return r; 364 } 365 366 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 367 struct drm_file *filp) 368 { 369 /* transition the BO to a domain - 370 * just validate the BO into a certain domain */ 371 struct radeon_device *rdev = dev->dev_private; 372 struct drm_radeon_gem_set_domain *args = data; 373 struct drm_gem_object *gobj; 374 struct radeon_bo *robj; 375 int r; 376 377 /* for now if someone requests domain CPU - 378 * just make sure the buffer is finished with */ 379 down_read(&rdev->exclusive_lock); 380 381 /* just do a BO wait for now */ 382 gobj = drm_gem_object_lookup(dev, filp, args->handle); 383 if (gobj == NULL) { 384 up_read(&rdev->exclusive_lock); 385 return -ENOENT; 386 } 387 robj = gem_to_radeon_bo(gobj); 388 389 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 390 391 drm_gem_object_unreference_unlocked(gobj); 392 up_read(&rdev->exclusive_lock); 393 r = radeon_gem_handle_lockup(robj->rdev, r); 394 return r; 395 } 396 397 static int radeon_mode_mmap(struct drm_file *filp, 398 struct drm_device *dev, 399 uint32_t handle, bool dumb, 400 uint64_t *offset_p) 401 { 402 struct drm_gem_object *gobj; 403 struct radeon_bo *robj; 404 405 gobj = drm_gem_object_lookup(dev, filp, handle); 406 if (gobj == NULL) { 407 return -ENOENT; 408 } 409 410 /* 411 * We don't allow dumb mmaps on objects created using another 412 * interface. 413 */ 414 WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach), 415 "Illegal dumb map of GPU buffer.\n"); 416 417 robj = gem_to_radeon_bo(gobj); 418 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { 419 drm_gem_object_unreference_unlocked(gobj); 420 return -EPERM; 421 } 422 *offset_p = radeon_bo_mmap_offset(robj); 423 drm_gem_object_unreference_unlocked(gobj); 424 return 0; 425 } 426 427 int radeon_mode_dumb_mmap(struct drm_file *filp, 428 struct drm_device *dev, 429 uint32_t handle, uint64_t *offset_p) 430 { 431 return radeon_mode_mmap(filp, dev, handle, true, offset_p); 432 } 433 434 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 435 struct drm_file *filp) 436 { 437 struct drm_radeon_gem_mmap *args = data; 438 439 return radeon_mode_mmap(filp, dev, args->handle, false, 440 &args->addr_ptr); 441 } 442 443 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 444 struct drm_file *filp) 445 { 446 struct radeon_device *rdev = dev->dev_private; 447 struct drm_radeon_gem_busy *args = data; 448 struct drm_gem_object *gobj; 449 struct radeon_bo *robj; 450 int r; 451 uint32_t cur_placement = 0; 452 453 gobj = drm_gem_object_lookup(dev, filp, args->handle); 454 if (gobj == NULL) { 455 return -ENOENT; 456 } 457 robj = gem_to_radeon_bo(gobj); 458 r = radeon_bo_wait(robj, &cur_placement, true); 459 args->domain = radeon_mem_type_to_domain(cur_placement); 460 drm_gem_object_unreference_unlocked(gobj); 461 r = radeon_gem_handle_lockup(rdev, r); 462 return r; 463 } 464 465 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 466 struct drm_file *filp) 467 { 468 struct radeon_device *rdev = dev->dev_private; 469 struct drm_radeon_gem_wait_idle *args = data; 470 struct drm_gem_object *gobj; 471 struct radeon_bo *robj; 472 int r = 0; 473 uint32_t cur_placement = 0; 474 long ret; 475 476 gobj = drm_gem_object_lookup(dev, filp, args->handle); 477 if (gobj == NULL) { 478 return -ENOENT; 479 } 480 robj = gem_to_radeon_bo(gobj); 481 482 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); 483 if (ret == 0) 484 r = -EBUSY; 485 else if (ret < 0) 486 r = ret; 487 488 /* Flush HDP cache via MMIO if necessary */ 489 if (rdev->asic->mmio_hdp_flush && 490 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) 491 robj->rdev->asic->mmio_hdp_flush(rdev); 492 drm_gem_object_unreference_unlocked(gobj); 493 r = radeon_gem_handle_lockup(rdev, r); 494 return r; 495 } 496 497 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 498 struct drm_file *filp) 499 { 500 struct drm_radeon_gem_set_tiling *args = data; 501 struct drm_gem_object *gobj; 502 struct radeon_bo *robj; 503 int r = 0; 504 505 DRM_DEBUG("%d \n", args->handle); 506 gobj = drm_gem_object_lookup(dev, filp, args->handle); 507 if (gobj == NULL) 508 return -ENOENT; 509 robj = gem_to_radeon_bo(gobj); 510 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); 511 drm_gem_object_unreference_unlocked(gobj); 512 return r; 513 } 514 515 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 516 struct drm_file *filp) 517 { 518 struct drm_radeon_gem_get_tiling *args = data; 519 struct drm_gem_object *gobj; 520 struct radeon_bo *rbo; 521 int r = 0; 522 523 DRM_DEBUG("\n"); 524 gobj = drm_gem_object_lookup(dev, filp, args->handle); 525 if (gobj == NULL) 526 return -ENOENT; 527 rbo = gem_to_radeon_bo(gobj); 528 r = radeon_bo_reserve(rbo, false); 529 if (unlikely(r != 0)) 530 goto out; 531 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 532 radeon_bo_unreserve(rbo); 533 out: 534 drm_gem_object_unreference_unlocked(gobj); 535 return r; 536 } 537 538 /** 539 * radeon_gem_va_update_vm -update the bo_va in its VM 540 * 541 * @rdev: radeon_device pointer 542 * @bo_va: bo_va to update 543 * 544 * Update the bo_va directly after setting it's address. Errors are not 545 * vital here, so they are not reported back to userspace. 546 */ 547 static void radeon_gem_va_update_vm(struct radeon_device *rdev, 548 struct radeon_bo_va *bo_va) 549 { 550 struct ttm_validate_buffer tv, *entry; 551 struct radeon_bo_list *vm_bos; 552 struct ww_acquire_ctx ticket; 553 struct list_head list; 554 unsigned domain; 555 int r; 556 557 INIT_LIST_HEAD(&list); 558 559 tv.bo = &bo_va->bo->tbo; 560 tv.shared = true; 561 list_add(&tv.head, &list); 562 563 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list); 564 if (!vm_bos) 565 return; 566 567 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); 568 if (r) 569 goto error_free; 570 571 list_for_each_entry(entry, &list, head) { 572 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type); 573 /* if anything is swapped out don't swap it in here, 574 just abort and wait for the next CS */ 575 if (domain == RADEON_GEM_DOMAIN_CPU) 576 goto error_unreserve; 577 } 578 579 mutex_lock(&bo_va->vm->mutex); 580 r = radeon_vm_clear_freed(rdev, bo_va->vm); 581 if (r) 582 goto error_unlock; 583 584 if (bo_va->it.start) 585 r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem); 586 587 error_unlock: 588 mutex_unlock(&bo_va->vm->mutex); 589 590 error_unreserve: 591 ttm_eu_backoff_reservation(&ticket, &list); 592 593 error_free: 594 drm_free_large(vm_bos); 595 596 if (r) 597 DRM_ERROR("Couldn't update BO_VA (%d)\n", r); 598 } 599 600 int radeon_gem_va_ioctl(struct drm_device *dev, void *data, 601 struct drm_file *filp) 602 { 603 struct drm_radeon_gem_va *args = data; 604 struct drm_gem_object *gobj; 605 struct radeon_device *rdev = dev->dev_private; 606 struct radeon_fpriv *fpriv = filp->driver_priv; 607 struct radeon_bo *rbo; 608 struct radeon_bo_va *bo_va; 609 u32 invalid_flags; 610 int r = 0; 611 612 if (!rdev->vm_manager.enabled) { 613 args->operation = RADEON_VA_RESULT_ERROR; 614 return -ENOTTY; 615 } 616 617 /* !! DONT REMOVE !! 618 * We don't support vm_id yet, to be sure we don't have have broken 619 * userspace, reject anyone trying to use non 0 value thus moving 620 * forward we can use those fields without breaking existant userspace 621 */ 622 if (args->vm_id) { 623 args->operation = RADEON_VA_RESULT_ERROR; 624 return -EINVAL; 625 } 626 627 if (args->offset < RADEON_VA_RESERVED_SIZE) { 628 dev_err(&dev->pdev->dev, 629 "offset 0x%lX is in reserved area 0x%X\n", 630 (unsigned long)args->offset, 631 RADEON_VA_RESERVED_SIZE); 632 args->operation = RADEON_VA_RESULT_ERROR; 633 return -EINVAL; 634 } 635 636 /* don't remove, we need to enforce userspace to set the snooped flag 637 * otherwise we will endup with broken userspace and we won't be able 638 * to enable this feature without adding new interface 639 */ 640 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; 641 if ((args->flags & invalid_flags)) { 642 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", 643 args->flags, invalid_flags); 644 args->operation = RADEON_VA_RESULT_ERROR; 645 return -EINVAL; 646 } 647 648 switch (args->operation) { 649 case RADEON_VA_MAP: 650 case RADEON_VA_UNMAP: 651 break; 652 default: 653 dev_err(&dev->pdev->dev, "unsupported operation %d\n", 654 args->operation); 655 args->operation = RADEON_VA_RESULT_ERROR; 656 return -EINVAL; 657 } 658 659 gobj = drm_gem_object_lookup(dev, filp, args->handle); 660 if (gobj == NULL) { 661 args->operation = RADEON_VA_RESULT_ERROR; 662 return -ENOENT; 663 } 664 rbo = gem_to_radeon_bo(gobj); 665 r = radeon_bo_reserve(rbo, false); 666 if (r) { 667 args->operation = RADEON_VA_RESULT_ERROR; 668 drm_gem_object_unreference_unlocked(gobj); 669 return r; 670 } 671 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); 672 if (!bo_va) { 673 args->operation = RADEON_VA_RESULT_ERROR; 674 drm_gem_object_unreference_unlocked(gobj); 675 return -ENOENT; 676 } 677 678 switch (args->operation) { 679 case RADEON_VA_MAP: 680 if (bo_va->it.start) { 681 args->operation = RADEON_VA_RESULT_VA_EXIST; 682 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; 683 radeon_bo_unreserve(rbo); 684 goto out; 685 } 686 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); 687 break; 688 case RADEON_VA_UNMAP: 689 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0); 690 break; 691 default: 692 break; 693 } 694 if (!r) 695 radeon_gem_va_update_vm(rdev, bo_va); 696 args->operation = RADEON_VA_RESULT_OK; 697 if (r) { 698 args->operation = RADEON_VA_RESULT_ERROR; 699 } 700 out: 701 drm_gem_object_unreference_unlocked(gobj); 702 return r; 703 } 704 705 int radeon_gem_op_ioctl(struct drm_device *dev, void *data, 706 struct drm_file *filp) 707 { 708 struct drm_radeon_gem_op *args = data; 709 struct drm_gem_object *gobj; 710 struct radeon_bo *robj; 711 int r; 712 713 gobj = drm_gem_object_lookup(dev, filp, args->handle); 714 if (gobj == NULL) { 715 return -ENOENT; 716 } 717 robj = gem_to_radeon_bo(gobj); 718 719 r = -EPERM; 720 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) 721 goto out; 722 723 r = radeon_bo_reserve(robj, false); 724 if (unlikely(r)) 725 goto out; 726 727 switch (args->op) { 728 case RADEON_GEM_OP_GET_INITIAL_DOMAIN: 729 args->value = robj->initial_domain; 730 break; 731 case RADEON_GEM_OP_SET_INITIAL_DOMAIN: 732 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | 733 RADEON_GEM_DOMAIN_GTT | 734 RADEON_GEM_DOMAIN_CPU); 735 break; 736 default: 737 r = -EINVAL; 738 } 739 740 radeon_bo_unreserve(robj); 741 out: 742 drm_gem_object_unreference_unlocked(gobj); 743 return r; 744 } 745 746 int radeon_mode_dumb_create(struct drm_file *file_priv, 747 struct drm_device *dev, 748 struct drm_mode_create_dumb *args) 749 { 750 struct radeon_device *rdev = dev->dev_private; 751 struct drm_gem_object *gobj; 752 uint32_t handle; 753 int r; 754 755 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); 756 args->size = args->pitch * args->height; 757 args->size = ALIGN(args->size, PAGE_SIZE); 758 759 r = radeon_gem_object_create(rdev, args->size, 0, 760 RADEON_GEM_DOMAIN_VRAM, 0, 761 false, &gobj); 762 if (r) 763 return -ENOMEM; 764 765 r = drm_gem_handle_create(file_priv, gobj, &handle); 766 gobj->dumb = true; 767 /* drop reference from allocate - handle holds it now */ 768 drm_gem_object_unreference_unlocked(gobj); 769 if (r) { 770 return r; 771 } 772 args->handle = handle; 773 return 0; 774 } 775 776 #if defined(CONFIG_DEBUG_FS) 777 static int radeon_debugfs_gem_info(struct seq_file *m, void *data) 778 { 779 struct drm_info_node *node = (struct drm_info_node *)m->private; 780 struct drm_device *dev = node->minor->dev; 781 struct radeon_device *rdev = dev->dev_private; 782 struct radeon_bo *rbo; 783 unsigned i = 0; 784 785 mutex_lock(&rdev->gem.mutex); 786 list_for_each_entry(rbo, &rdev->gem.objects, list) { 787 unsigned domain; 788 const char *placement; 789 790 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type); 791 switch (domain) { 792 case RADEON_GEM_DOMAIN_VRAM: 793 placement = "VRAM"; 794 break; 795 case RADEON_GEM_DOMAIN_GTT: 796 placement = " GTT"; 797 break; 798 case RADEON_GEM_DOMAIN_CPU: 799 default: 800 placement = " CPU"; 801 break; 802 } 803 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", 804 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20, 805 placement, (unsigned long)rbo->pid); 806 i++; 807 } 808 mutex_unlock(&rdev->gem.mutex); 809 return 0; 810 } 811 812 static struct drm_info_list radeon_debugfs_gem_list[] = { 813 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL}, 814 }; 815 #endif 816 817 int radeon_gem_debugfs_init(struct radeon_device *rdev) 818 { 819 #if defined(CONFIG_DEBUG_FS) 820 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1); 821 #endif 822 return 0; 823 } 824