1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 29 #include <linux/pci.h> 30 31 #include <drm/drm_debugfs.h> 32 #include <drm/drm_device.h> 33 #include <drm/drm_file.h> 34 #include <drm/radeon_drm.h> 35 36 #include "radeon.h" 37 38 void radeon_gem_object_free(struct drm_gem_object *gobj) 39 { 40 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 41 42 if (robj) { 43 radeon_mn_unregister(robj); 44 radeon_bo_unref(&robj); 45 } 46 } 47 48 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, 49 int alignment, int initial_domain, 50 u32 flags, bool kernel, 51 struct drm_gem_object **obj) 52 { 53 struct radeon_bo *robj; 54 unsigned long max_size; 55 int r; 56 57 *obj = NULL; 58 /* At least align on page size */ 59 if (alignment < PAGE_SIZE) { 60 alignment = PAGE_SIZE; 61 } 62 63 /* Maximum bo size is the unpinned gtt size since we use the gtt to 64 * handle vram to system pool migrations. 65 */ 66 max_size = rdev->mc.gtt_size - rdev->gart_pin_size; 67 if (size > max_size) { 68 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", 69 size >> 20, max_size >> 20); 70 return -ENOMEM; 71 } 72 73 retry: 74 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, 75 flags, NULL, NULL, &robj); 76 if (r) { 77 if (r != -ERESTARTSYS) { 78 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { 79 initial_domain |= RADEON_GEM_DOMAIN_GTT; 80 goto retry; 81 } 82 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", 83 size, initial_domain, alignment, r); 84 } 85 return r; 86 } 87 *obj = &robj->tbo.base; 88 robj->pid = task_pid_nr(current); 89 90 mutex_lock(&rdev->gem.mutex); 91 list_add_tail(&robj->list, &rdev->gem.objects); 92 mutex_unlock(&rdev->gem.mutex); 93 94 return 0; 95 } 96 97 static int radeon_gem_set_domain(struct drm_gem_object *gobj, 98 uint32_t rdomain, uint32_t wdomain) 99 { 100 struct radeon_bo *robj; 101 uint32_t domain; 102 long r; 103 104 /* FIXME: reeimplement */ 105 robj = gem_to_radeon_bo(gobj); 106 /* work out where to validate the buffer to */ 107 domain = wdomain; 108 if (!domain) { 109 domain = rdomain; 110 } 111 if (!domain) { 112 /* Do nothings */ 113 pr_warn("Set domain without domain !\n"); 114 return 0; 115 } 116 if (domain == RADEON_GEM_DOMAIN_CPU) { 117 /* Asking for cpu access wait for object idle */ 118 r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); 119 if (!r) 120 r = -EBUSY; 121 122 if (r < 0 && r != -EINTR) { 123 pr_err("Failed to wait for object: %li\n", r); 124 return r; 125 } 126 } 127 if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) { 128 /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */ 129 return -EINVAL; 130 } 131 return 0; 132 } 133 134 int radeon_gem_init(struct radeon_device *rdev) 135 { 136 INIT_LIST_HEAD(&rdev->gem.objects); 137 return 0; 138 } 139 140 void radeon_gem_fini(struct radeon_device *rdev) 141 { 142 radeon_bo_force_delete(rdev); 143 } 144 145 /* 146 * Call from drm_gem_handle_create which appear in both new and open ioctl 147 * case. 148 */ 149 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 150 { 151 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 152 struct radeon_device *rdev = rbo->rdev; 153 struct radeon_fpriv *fpriv = file_priv->driver_priv; 154 struct radeon_vm *vm = &fpriv->vm; 155 struct radeon_bo_va *bo_va; 156 int r; 157 158 if ((rdev->family < CHIP_CAYMAN) || 159 (!rdev->accel_working)) { 160 return 0; 161 } 162 163 r = radeon_bo_reserve(rbo, false); 164 if (r) { 165 return r; 166 } 167 168 bo_va = radeon_vm_bo_find(vm, rbo); 169 if (!bo_va) { 170 bo_va = radeon_vm_bo_add(rdev, vm, rbo); 171 } else { 172 ++bo_va->ref_count; 173 } 174 radeon_bo_unreserve(rbo); 175 176 return 0; 177 } 178 179 void radeon_gem_object_close(struct drm_gem_object *obj, 180 struct drm_file *file_priv) 181 { 182 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 183 struct radeon_device *rdev = rbo->rdev; 184 struct radeon_fpriv *fpriv = file_priv->driver_priv; 185 struct radeon_vm *vm = &fpriv->vm; 186 struct radeon_bo_va *bo_va; 187 int r; 188 189 if ((rdev->family < CHIP_CAYMAN) || 190 (!rdev->accel_working)) { 191 return; 192 } 193 194 r = radeon_bo_reserve(rbo, true); 195 if (r) { 196 dev_err(rdev->dev, "leaking bo va because " 197 "we fail to reserve bo (%d)\n", r); 198 return; 199 } 200 bo_va = radeon_vm_bo_find(vm, rbo); 201 if (bo_va) { 202 if (--bo_va->ref_count == 0) { 203 radeon_vm_bo_rmv(rdev, bo_va); 204 } 205 } 206 radeon_bo_unreserve(rbo); 207 } 208 209 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) 210 { 211 if (r == -EDEADLK) { 212 r = radeon_gpu_reset(rdev); 213 if (!r) 214 r = -EAGAIN; 215 } 216 return r; 217 } 218 219 /* 220 * GEM ioctls. 221 */ 222 int radeon_gem_info_ioctl(struct drm_device *dev, void *data, 223 struct drm_file *filp) 224 { 225 struct radeon_device *rdev = dev->dev_private; 226 struct drm_radeon_gem_info *args = data; 227 struct ttm_resource_manager *man; 228 229 man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM); 230 231 args->vram_size = (u64)man->size << PAGE_SHIFT; 232 args->vram_visible = rdev->mc.visible_vram_size; 233 args->vram_visible -= rdev->vram_pin_size; 234 args->gart_size = rdev->mc.gtt_size; 235 args->gart_size -= rdev->gart_pin_size; 236 237 return 0; 238 } 239 240 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, 241 struct drm_file *filp) 242 { 243 /* TODO: implement */ 244 DRM_ERROR("unimplemented %s\n", __func__); 245 return -ENOSYS; 246 } 247 248 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, 249 struct drm_file *filp) 250 { 251 /* TODO: implement */ 252 DRM_ERROR("unimplemented %s\n", __func__); 253 return -ENOSYS; 254 } 255 256 int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 257 struct drm_file *filp) 258 { 259 struct radeon_device *rdev = dev->dev_private; 260 struct drm_radeon_gem_create *args = data; 261 struct drm_gem_object *gobj; 262 uint32_t handle; 263 int r; 264 265 down_read(&rdev->exclusive_lock); 266 /* create a gem object to contain this object in */ 267 args->size = roundup(args->size, PAGE_SIZE); 268 r = radeon_gem_object_create(rdev, args->size, args->alignment, 269 args->initial_domain, args->flags, 270 false, &gobj); 271 if (r) { 272 up_read(&rdev->exclusive_lock); 273 r = radeon_gem_handle_lockup(rdev, r); 274 return r; 275 } 276 r = drm_gem_handle_create(filp, gobj, &handle); 277 /* drop reference from allocate - handle holds it now */ 278 drm_gem_object_put(gobj); 279 if (r) { 280 up_read(&rdev->exclusive_lock); 281 r = radeon_gem_handle_lockup(rdev, r); 282 return r; 283 } 284 args->handle = handle; 285 up_read(&rdev->exclusive_lock); 286 return 0; 287 } 288 289 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, 290 struct drm_file *filp) 291 { 292 struct ttm_operation_ctx ctx = { true, false }; 293 struct radeon_device *rdev = dev->dev_private; 294 struct drm_radeon_gem_userptr *args = data; 295 struct drm_gem_object *gobj; 296 struct radeon_bo *bo; 297 uint32_t handle; 298 int r; 299 300 args->addr = untagged_addr(args->addr); 301 302 if (offset_in_page(args->addr | args->size)) 303 return -EINVAL; 304 305 /* reject unknown flag values */ 306 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | 307 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE | 308 RADEON_GEM_USERPTR_REGISTER)) 309 return -EINVAL; 310 311 if (args->flags & RADEON_GEM_USERPTR_READONLY) { 312 /* readonly pages not tested on older hardware */ 313 if (rdev->family < CHIP_R600) 314 return -EINVAL; 315 316 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) || 317 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) { 318 319 /* if we want to write to it we must require anonymous 320 memory and install a MMU notifier */ 321 return -EACCES; 322 } 323 324 down_read(&rdev->exclusive_lock); 325 326 /* create a gem object to contain this object in */ 327 r = radeon_gem_object_create(rdev, args->size, 0, 328 RADEON_GEM_DOMAIN_CPU, 0, 329 false, &gobj); 330 if (r) 331 goto handle_lockup; 332 333 bo = gem_to_radeon_bo(gobj); 334 r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags); 335 if (r) 336 goto release_object; 337 338 if (args->flags & RADEON_GEM_USERPTR_REGISTER) { 339 r = radeon_mn_register(bo, args->addr); 340 if (r) 341 goto release_object; 342 } 343 344 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { 345 mmap_read_lock(current->mm); 346 r = radeon_bo_reserve(bo, true); 347 if (r) { 348 mmap_read_unlock(current->mm); 349 goto release_object; 350 } 351 352 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); 353 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 354 radeon_bo_unreserve(bo); 355 mmap_read_unlock(current->mm); 356 if (r) 357 goto release_object; 358 } 359 360 r = drm_gem_handle_create(filp, gobj, &handle); 361 /* drop reference from allocate - handle holds it now */ 362 drm_gem_object_put(gobj); 363 if (r) 364 goto handle_lockup; 365 366 args->handle = handle; 367 up_read(&rdev->exclusive_lock); 368 return 0; 369 370 release_object: 371 drm_gem_object_put(gobj); 372 373 handle_lockup: 374 up_read(&rdev->exclusive_lock); 375 r = radeon_gem_handle_lockup(rdev, r); 376 377 return r; 378 } 379 380 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 381 struct drm_file *filp) 382 { 383 /* transition the BO to a domain - 384 * just validate the BO into a certain domain */ 385 struct radeon_device *rdev = dev->dev_private; 386 struct drm_radeon_gem_set_domain *args = data; 387 struct drm_gem_object *gobj; 388 struct radeon_bo *robj; 389 int r; 390 391 /* for now if someone requests domain CPU - 392 * just make sure the buffer is finished with */ 393 down_read(&rdev->exclusive_lock); 394 395 /* just do a BO wait for now */ 396 gobj = drm_gem_object_lookup(filp, args->handle); 397 if (gobj == NULL) { 398 up_read(&rdev->exclusive_lock); 399 return -ENOENT; 400 } 401 robj = gem_to_radeon_bo(gobj); 402 403 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 404 405 drm_gem_object_put(gobj); 406 up_read(&rdev->exclusive_lock); 407 r = radeon_gem_handle_lockup(robj->rdev, r); 408 return r; 409 } 410 411 int radeon_mode_dumb_mmap(struct drm_file *filp, 412 struct drm_device *dev, 413 uint32_t handle, uint64_t *offset_p) 414 { 415 struct drm_gem_object *gobj; 416 struct radeon_bo *robj; 417 418 gobj = drm_gem_object_lookup(filp, handle); 419 if (gobj == NULL) { 420 return -ENOENT; 421 } 422 robj = gem_to_radeon_bo(gobj); 423 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) { 424 drm_gem_object_put(gobj); 425 return -EPERM; 426 } 427 *offset_p = radeon_bo_mmap_offset(robj); 428 drm_gem_object_put(gobj); 429 return 0; 430 } 431 432 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 433 struct drm_file *filp) 434 { 435 struct drm_radeon_gem_mmap *args = data; 436 437 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); 438 } 439 440 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 441 struct drm_file *filp) 442 { 443 struct drm_radeon_gem_busy *args = data; 444 struct drm_gem_object *gobj; 445 struct radeon_bo *robj; 446 int r; 447 uint32_t cur_placement = 0; 448 449 gobj = drm_gem_object_lookup(filp, args->handle); 450 if (gobj == NULL) { 451 return -ENOENT; 452 } 453 robj = gem_to_radeon_bo(gobj); 454 455 r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true); 456 if (r == 0) 457 r = -EBUSY; 458 else 459 r = 0; 460 461 cur_placement = READ_ONCE(robj->tbo.mem.mem_type); 462 args->domain = radeon_mem_type_to_domain(cur_placement); 463 drm_gem_object_put(gobj); 464 return r; 465 } 466 467 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 468 struct drm_file *filp) 469 { 470 struct radeon_device *rdev = dev->dev_private; 471 struct drm_radeon_gem_wait_idle *args = data; 472 struct drm_gem_object *gobj; 473 struct radeon_bo *robj; 474 int r = 0; 475 uint32_t cur_placement = 0; 476 long ret; 477 478 gobj = drm_gem_object_lookup(filp, args->handle); 479 if (gobj == NULL) { 480 return -ENOENT; 481 } 482 robj = gem_to_radeon_bo(gobj); 483 484 ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); 485 if (ret == 0) 486 r = -EBUSY; 487 else if (ret < 0) 488 r = ret; 489 490 /* Flush HDP cache via MMIO if necessary */ 491 cur_placement = READ_ONCE(robj->tbo.mem.mem_type); 492 if (rdev->asic->mmio_hdp_flush && 493 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) 494 robj->rdev->asic->mmio_hdp_flush(rdev); 495 drm_gem_object_put(gobj); 496 r = radeon_gem_handle_lockup(rdev, r); 497 return r; 498 } 499 500 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 501 struct drm_file *filp) 502 { 503 struct drm_radeon_gem_set_tiling *args = data; 504 struct drm_gem_object *gobj; 505 struct radeon_bo *robj; 506 int r = 0; 507 508 DRM_DEBUG("%d \n", args->handle); 509 gobj = drm_gem_object_lookup(filp, args->handle); 510 if (gobj == NULL) 511 return -ENOENT; 512 robj = gem_to_radeon_bo(gobj); 513 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); 514 drm_gem_object_put(gobj); 515 return r; 516 } 517 518 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 519 struct drm_file *filp) 520 { 521 struct drm_radeon_gem_get_tiling *args = data; 522 struct drm_gem_object *gobj; 523 struct radeon_bo *rbo; 524 int r = 0; 525 526 DRM_DEBUG("\n"); 527 gobj = drm_gem_object_lookup(filp, args->handle); 528 if (gobj == NULL) 529 return -ENOENT; 530 rbo = gem_to_radeon_bo(gobj); 531 r = radeon_bo_reserve(rbo, false); 532 if (unlikely(r != 0)) 533 goto out; 534 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 535 radeon_bo_unreserve(rbo); 536 out: 537 drm_gem_object_put(gobj); 538 return r; 539 } 540 541 /** 542 * radeon_gem_va_update_vm -update the bo_va in its VM 543 * 544 * @rdev: radeon_device pointer 545 * @bo_va: bo_va to update 546 * 547 * Update the bo_va directly after setting it's address. Errors are not 548 * vital here, so they are not reported back to userspace. 549 */ 550 static void radeon_gem_va_update_vm(struct radeon_device *rdev, 551 struct radeon_bo_va *bo_va) 552 { 553 struct ttm_validate_buffer tv, *entry; 554 struct radeon_bo_list *vm_bos; 555 struct ww_acquire_ctx ticket; 556 struct list_head list; 557 unsigned domain; 558 int r; 559 560 INIT_LIST_HEAD(&list); 561 562 tv.bo = &bo_va->bo->tbo; 563 tv.num_shared = 1; 564 list_add(&tv.head, &list); 565 566 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list); 567 if (!vm_bos) 568 return; 569 570 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); 571 if (r) 572 goto error_free; 573 574 list_for_each_entry(entry, &list, head) { 575 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type); 576 /* if anything is swapped out don't swap it in here, 577 just abort and wait for the next CS */ 578 if (domain == RADEON_GEM_DOMAIN_CPU) 579 goto error_unreserve; 580 } 581 582 mutex_lock(&bo_va->vm->mutex); 583 r = radeon_vm_clear_freed(rdev, bo_va->vm); 584 if (r) 585 goto error_unlock; 586 587 if (bo_va->it.start) 588 r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem); 589 590 error_unlock: 591 mutex_unlock(&bo_va->vm->mutex); 592 593 error_unreserve: 594 ttm_eu_backoff_reservation(&ticket, &list); 595 596 error_free: 597 kvfree(vm_bos); 598 599 if (r && r != -ERESTARTSYS) 600 DRM_ERROR("Couldn't update BO_VA (%d)\n", r); 601 } 602 603 int radeon_gem_va_ioctl(struct drm_device *dev, void *data, 604 struct drm_file *filp) 605 { 606 struct drm_radeon_gem_va *args = data; 607 struct drm_gem_object *gobj; 608 struct radeon_device *rdev = dev->dev_private; 609 struct radeon_fpriv *fpriv = filp->driver_priv; 610 struct radeon_bo *rbo; 611 struct radeon_bo_va *bo_va; 612 u32 invalid_flags; 613 int r = 0; 614 615 if (!rdev->vm_manager.enabled) { 616 args->operation = RADEON_VA_RESULT_ERROR; 617 return -ENOTTY; 618 } 619 620 /* !! DONT REMOVE !! 621 * We don't support vm_id yet, to be sure we don't have have broken 622 * userspace, reject anyone trying to use non 0 value thus moving 623 * forward we can use those fields without breaking existant userspace 624 */ 625 if (args->vm_id) { 626 args->operation = RADEON_VA_RESULT_ERROR; 627 return -EINVAL; 628 } 629 630 if (args->offset < RADEON_VA_RESERVED_SIZE) { 631 dev_err(&dev->pdev->dev, 632 "offset 0x%lX is in reserved area 0x%X\n", 633 (unsigned long)args->offset, 634 RADEON_VA_RESERVED_SIZE); 635 args->operation = RADEON_VA_RESULT_ERROR; 636 return -EINVAL; 637 } 638 639 /* don't remove, we need to enforce userspace to set the snooped flag 640 * otherwise we will endup with broken userspace and we won't be able 641 * to enable this feature without adding new interface 642 */ 643 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; 644 if ((args->flags & invalid_flags)) { 645 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", 646 args->flags, invalid_flags); 647 args->operation = RADEON_VA_RESULT_ERROR; 648 return -EINVAL; 649 } 650 651 switch (args->operation) { 652 case RADEON_VA_MAP: 653 case RADEON_VA_UNMAP: 654 break; 655 default: 656 dev_err(&dev->pdev->dev, "unsupported operation %d\n", 657 args->operation); 658 args->operation = RADEON_VA_RESULT_ERROR; 659 return -EINVAL; 660 } 661 662 gobj = drm_gem_object_lookup(filp, args->handle); 663 if (gobj == NULL) { 664 args->operation = RADEON_VA_RESULT_ERROR; 665 return -ENOENT; 666 } 667 rbo = gem_to_radeon_bo(gobj); 668 r = radeon_bo_reserve(rbo, false); 669 if (r) { 670 args->operation = RADEON_VA_RESULT_ERROR; 671 drm_gem_object_put(gobj); 672 return r; 673 } 674 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); 675 if (!bo_va) { 676 args->operation = RADEON_VA_RESULT_ERROR; 677 radeon_bo_unreserve(rbo); 678 drm_gem_object_put(gobj); 679 return -ENOENT; 680 } 681 682 switch (args->operation) { 683 case RADEON_VA_MAP: 684 if (bo_va->it.start) { 685 args->operation = RADEON_VA_RESULT_VA_EXIST; 686 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; 687 radeon_bo_unreserve(rbo); 688 goto out; 689 } 690 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); 691 break; 692 case RADEON_VA_UNMAP: 693 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0); 694 break; 695 default: 696 break; 697 } 698 if (!r) 699 radeon_gem_va_update_vm(rdev, bo_va); 700 args->operation = RADEON_VA_RESULT_OK; 701 if (r) { 702 args->operation = RADEON_VA_RESULT_ERROR; 703 } 704 out: 705 drm_gem_object_put(gobj); 706 return r; 707 } 708 709 int radeon_gem_op_ioctl(struct drm_device *dev, void *data, 710 struct drm_file *filp) 711 { 712 struct drm_radeon_gem_op *args = data; 713 struct drm_gem_object *gobj; 714 struct radeon_bo *robj; 715 int r; 716 717 gobj = drm_gem_object_lookup(filp, args->handle); 718 if (gobj == NULL) { 719 return -ENOENT; 720 } 721 robj = gem_to_radeon_bo(gobj); 722 723 r = -EPERM; 724 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) 725 goto out; 726 727 r = radeon_bo_reserve(robj, false); 728 if (unlikely(r)) 729 goto out; 730 731 switch (args->op) { 732 case RADEON_GEM_OP_GET_INITIAL_DOMAIN: 733 args->value = robj->initial_domain; 734 break; 735 case RADEON_GEM_OP_SET_INITIAL_DOMAIN: 736 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | 737 RADEON_GEM_DOMAIN_GTT | 738 RADEON_GEM_DOMAIN_CPU); 739 break; 740 default: 741 r = -EINVAL; 742 } 743 744 radeon_bo_unreserve(robj); 745 out: 746 drm_gem_object_put(gobj); 747 return r; 748 } 749 750 int radeon_mode_dumb_create(struct drm_file *file_priv, 751 struct drm_device *dev, 752 struct drm_mode_create_dumb *args) 753 { 754 struct radeon_device *rdev = dev->dev_private; 755 struct drm_gem_object *gobj; 756 uint32_t handle; 757 int r; 758 759 args->pitch = radeon_align_pitch(rdev, args->width, 760 DIV_ROUND_UP(args->bpp, 8), 0); 761 args->size = args->pitch * args->height; 762 args->size = ALIGN(args->size, PAGE_SIZE); 763 764 r = radeon_gem_object_create(rdev, args->size, 0, 765 RADEON_GEM_DOMAIN_VRAM, 0, 766 false, &gobj); 767 if (r) 768 return -ENOMEM; 769 770 r = drm_gem_handle_create(file_priv, gobj, &handle); 771 /* drop reference from allocate - handle holds it now */ 772 drm_gem_object_put(gobj); 773 if (r) { 774 return r; 775 } 776 args->handle = handle; 777 return 0; 778 } 779 780 #if defined(CONFIG_DEBUG_FS) 781 static int radeon_debugfs_gem_info(struct seq_file *m, void *data) 782 { 783 struct drm_info_node *node = (struct drm_info_node *)m->private; 784 struct drm_device *dev = node->minor->dev; 785 struct radeon_device *rdev = dev->dev_private; 786 struct radeon_bo *rbo; 787 unsigned i = 0; 788 789 mutex_lock(&rdev->gem.mutex); 790 list_for_each_entry(rbo, &rdev->gem.objects, list) { 791 unsigned domain; 792 const char *placement; 793 794 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type); 795 switch (domain) { 796 case RADEON_GEM_DOMAIN_VRAM: 797 placement = "VRAM"; 798 break; 799 case RADEON_GEM_DOMAIN_GTT: 800 placement = " GTT"; 801 break; 802 case RADEON_GEM_DOMAIN_CPU: 803 default: 804 placement = " CPU"; 805 break; 806 } 807 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", 808 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20, 809 placement, (unsigned long)rbo->pid); 810 i++; 811 } 812 mutex_unlock(&rdev->gem.mutex); 813 return 0; 814 } 815 816 static struct drm_info_list radeon_debugfs_gem_list[] = { 817 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL}, 818 }; 819 #endif 820 821 int radeon_gem_debugfs_init(struct radeon_device *rdev) 822 { 823 #if defined(CONFIG_DEBUG_FS) 824 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1); 825 #endif 826 return 0; 827 } 828