1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/ktime.h> 29 #include <drm/drmP.h> 30 #include <drm/amdgpu_drm.h> 31 #include "amdgpu.h" 32 33 void amdgpu_gem_object_free(struct drm_gem_object *gobj) 34 { 35 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); 36 37 if (robj) { 38 if (robj->gem_base.import_attach) 39 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); 40 amdgpu_mn_unregister(robj); 41 amdgpu_bo_unref(&robj); 42 } 43 } 44 45 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 46 int alignment, u32 initial_domain, 47 u64 flags, bool kernel, 48 struct drm_gem_object **obj) 49 { 50 struct amdgpu_bo *robj; 51 unsigned long max_size; 52 int r; 53 54 *obj = NULL; 55 /* At least align on page size */ 56 if (alignment < PAGE_SIZE) { 57 alignment = PAGE_SIZE; 58 } 59 60 if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) { 61 /* Maximum bo size is the unpinned gtt size since we use the gtt to 62 * handle vram to system pool migrations. 63 */ 64 max_size = adev->mc.gtt_size - adev->gart_pin_size; 65 if (size > max_size) { 66 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", 67 size >> 20, max_size >> 20); 68 return -ENOMEM; 69 } 70 } 71 retry: 72 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, 73 flags, NULL, NULL, &robj); 74 if (r) { 75 if (r != -ERESTARTSYS) { 76 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { 77 initial_domain |= AMDGPU_GEM_DOMAIN_GTT; 78 goto retry; 79 } 80 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", 81 size, initial_domain, alignment, r); 82 } 83 return r; 84 } 85 *obj = &robj->gem_base; 86 robj->pid = task_pid_nr(current); 87 88 mutex_lock(&adev->gem.mutex); 89 list_add_tail(&robj->list, &adev->gem.objects); 90 mutex_unlock(&adev->gem.mutex); 91 92 return 0; 93 } 94 95 int amdgpu_gem_init(struct amdgpu_device *adev) 96 { 97 INIT_LIST_HEAD(&adev->gem.objects); 98 return 0; 99 } 100 101 void amdgpu_gem_fini(struct amdgpu_device *adev) 102 { 103 amdgpu_bo_force_delete(adev); 104 } 105 106 /* 107 * Call from drm_gem_handle_create which appear in both new and open ioctl 108 * case. 109 */ 110 int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 111 { 112 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj); 113 struct amdgpu_device *adev = rbo->adev; 114 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 115 struct amdgpu_vm *vm = &fpriv->vm; 116 struct amdgpu_bo_va *bo_va; 117 int r; 118 mutex_lock(&vm->mutex); 119 r = amdgpu_bo_reserve(rbo, false); 120 if (r) { 121 mutex_unlock(&vm->mutex); 122 return r; 123 } 124 125 bo_va = amdgpu_vm_bo_find(vm, rbo); 126 if (!bo_va) { 127 bo_va = amdgpu_vm_bo_add(adev, vm, rbo); 128 } else { 129 ++bo_va->ref_count; 130 } 131 amdgpu_bo_unreserve(rbo); 132 mutex_unlock(&vm->mutex); 133 return 0; 134 } 135 136 void amdgpu_gem_object_close(struct drm_gem_object *obj, 137 struct drm_file *file_priv) 138 { 139 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj); 140 struct amdgpu_device *adev = rbo->adev; 141 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 142 struct amdgpu_vm *vm = &fpriv->vm; 143 struct amdgpu_bo_va *bo_va; 144 int r; 145 mutex_lock(&vm->mutex); 146 r = amdgpu_bo_reserve(rbo, true); 147 if (r) { 148 mutex_unlock(&vm->mutex); 149 dev_err(adev->dev, "leaking bo va because " 150 "we fail to reserve bo (%d)\n", r); 151 return; 152 } 153 bo_va = amdgpu_vm_bo_find(vm, rbo); 154 if (bo_va) { 155 if (--bo_va->ref_count == 0) { 156 amdgpu_vm_bo_rmv(adev, bo_va); 157 } 158 } 159 amdgpu_bo_unreserve(rbo); 160 mutex_unlock(&vm->mutex); 161 } 162 163 static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) 164 { 165 if (r == -EDEADLK) { 166 r = amdgpu_gpu_reset(adev); 167 if (!r) 168 r = -EAGAIN; 169 } 170 return r; 171 } 172 173 /* 174 * GEM ioctls. 175 */ 176 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, 177 struct drm_file *filp) 178 { 179 struct amdgpu_device *adev = dev->dev_private; 180 union drm_amdgpu_gem_create *args = data; 181 uint64_t size = args->in.bo_size; 182 struct drm_gem_object *gobj; 183 uint32_t handle; 184 bool kernel = false; 185 int r; 186 187 /* create a gem object to contain this object in */ 188 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | 189 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { 190 kernel = true; 191 if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS) 192 size = size << AMDGPU_GDS_SHIFT; 193 else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS) 194 size = size << AMDGPU_GWS_SHIFT; 195 else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA) 196 size = size << AMDGPU_OA_SHIFT; 197 else { 198 r = -EINVAL; 199 goto error_unlock; 200 } 201 } 202 size = roundup(size, PAGE_SIZE); 203 204 r = amdgpu_gem_object_create(adev, size, args->in.alignment, 205 (u32)(0xffffffff & args->in.domains), 206 args->in.domain_flags, 207 kernel, &gobj); 208 if (r) 209 goto error_unlock; 210 211 r = drm_gem_handle_create(filp, gobj, &handle); 212 /* drop reference from allocate - handle holds it now */ 213 drm_gem_object_unreference_unlocked(gobj); 214 if (r) 215 goto error_unlock; 216 217 memset(args, 0, sizeof(*args)); 218 args->out.handle = handle; 219 return 0; 220 221 error_unlock: 222 r = amdgpu_gem_handle_lockup(adev, r); 223 return r; 224 } 225 226 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, 227 struct drm_file *filp) 228 { 229 struct amdgpu_device *adev = dev->dev_private; 230 struct drm_amdgpu_gem_userptr *args = data; 231 struct drm_gem_object *gobj; 232 struct amdgpu_bo *bo; 233 uint32_t handle; 234 int r; 235 236 if (offset_in_page(args->addr | args->size)) 237 return -EINVAL; 238 239 /* reject unknown flag values */ 240 if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY | 241 AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE | 242 AMDGPU_GEM_USERPTR_REGISTER)) 243 return -EINVAL; 244 245 if (!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) || 246 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) { 247 248 /* if we want to write to it we must require anonymous 249 memory and install a MMU notifier */ 250 return -EACCES; 251 } 252 253 /* create a gem object to contain this object in */ 254 r = amdgpu_gem_object_create(adev, args->size, 0, 255 AMDGPU_GEM_DOMAIN_CPU, 0, 256 0, &gobj); 257 if (r) 258 goto handle_lockup; 259 260 bo = gem_to_amdgpu_bo(gobj); 261 r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); 262 if (r) 263 goto release_object; 264 265 if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) { 266 r = amdgpu_mn_register(bo, args->addr); 267 if (r) 268 goto release_object; 269 } 270 271 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { 272 down_read(¤t->mm->mmap_sem); 273 r = amdgpu_bo_reserve(bo, true); 274 if (r) { 275 up_read(¤t->mm->mmap_sem); 276 goto release_object; 277 } 278 279 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 280 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 281 amdgpu_bo_unreserve(bo); 282 up_read(¤t->mm->mmap_sem); 283 if (r) 284 goto release_object; 285 } 286 287 r = drm_gem_handle_create(filp, gobj, &handle); 288 /* drop reference from allocate - handle holds it now */ 289 drm_gem_object_unreference_unlocked(gobj); 290 if (r) 291 goto handle_lockup; 292 293 args->handle = handle; 294 return 0; 295 296 release_object: 297 drm_gem_object_unreference_unlocked(gobj); 298 299 handle_lockup: 300 r = amdgpu_gem_handle_lockup(adev, r); 301 302 return r; 303 } 304 305 int amdgpu_mode_dumb_mmap(struct drm_file *filp, 306 struct drm_device *dev, 307 uint32_t handle, uint64_t *offset_p) 308 { 309 struct drm_gem_object *gobj; 310 struct amdgpu_bo *robj; 311 312 gobj = drm_gem_object_lookup(dev, filp, handle); 313 if (gobj == NULL) { 314 return -ENOENT; 315 } 316 robj = gem_to_amdgpu_bo(gobj); 317 if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm) || 318 (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { 319 drm_gem_object_unreference_unlocked(gobj); 320 return -EPERM; 321 } 322 *offset_p = amdgpu_bo_mmap_offset(robj); 323 drm_gem_object_unreference_unlocked(gobj); 324 return 0; 325 } 326 327 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, 328 struct drm_file *filp) 329 { 330 union drm_amdgpu_gem_mmap *args = data; 331 uint32_t handle = args->in.handle; 332 memset(args, 0, sizeof(*args)); 333 return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr); 334 } 335 336 /** 337 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value 338 * 339 * @timeout_ns: timeout in ns 340 * 341 * Calculate the timeout in jiffies from an absolute timeout in ns. 342 */ 343 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns) 344 { 345 unsigned long timeout_jiffies; 346 ktime_t timeout; 347 348 /* clamp timeout if it's to large */ 349 if (((int64_t)timeout_ns) < 0) 350 return MAX_SCHEDULE_TIMEOUT; 351 352 timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get()); 353 if (ktime_to_ns(timeout) < 0) 354 return 0; 355 356 timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout)); 357 /* clamp timeout to avoid unsigned-> signed overflow */ 358 if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT ) 359 return MAX_SCHEDULE_TIMEOUT - 1; 360 361 return timeout_jiffies; 362 } 363 364 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 365 struct drm_file *filp) 366 { 367 struct amdgpu_device *adev = dev->dev_private; 368 union drm_amdgpu_gem_wait_idle *args = data; 369 struct drm_gem_object *gobj; 370 struct amdgpu_bo *robj; 371 uint32_t handle = args->in.handle; 372 unsigned long timeout = amdgpu_gem_timeout(args->in.timeout); 373 int r = 0; 374 long ret; 375 376 gobj = drm_gem_object_lookup(dev, filp, handle); 377 if (gobj == NULL) { 378 return -ENOENT; 379 } 380 robj = gem_to_amdgpu_bo(gobj); 381 if (timeout == 0) 382 ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true); 383 else 384 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout); 385 386 /* ret == 0 means not signaled, 387 * ret > 0 means signaled 388 * ret < 0 means interrupted before timeout 389 */ 390 if (ret >= 0) { 391 memset(args, 0, sizeof(*args)); 392 args->out.status = (ret == 0); 393 } else 394 r = ret; 395 396 drm_gem_object_unreference_unlocked(gobj); 397 r = amdgpu_gem_handle_lockup(adev, r); 398 return r; 399 } 400 401 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, 402 struct drm_file *filp) 403 { 404 struct drm_amdgpu_gem_metadata *args = data; 405 struct drm_gem_object *gobj; 406 struct amdgpu_bo *robj; 407 int r = -1; 408 409 DRM_DEBUG("%d \n", args->handle); 410 gobj = drm_gem_object_lookup(dev, filp, args->handle); 411 if (gobj == NULL) 412 return -ENOENT; 413 robj = gem_to_amdgpu_bo(gobj); 414 415 r = amdgpu_bo_reserve(robj, false); 416 if (unlikely(r != 0)) 417 goto out; 418 419 if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) { 420 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info); 421 r = amdgpu_bo_get_metadata(robj, args->data.data, 422 sizeof(args->data.data), 423 &args->data.data_size_bytes, 424 &args->data.flags); 425 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) { 426 if (args->data.data_size_bytes > sizeof(args->data.data)) { 427 r = -EINVAL; 428 goto unreserve; 429 } 430 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info); 431 if (!r) 432 r = amdgpu_bo_set_metadata(robj, args->data.data, 433 args->data.data_size_bytes, 434 args->data.flags); 435 } 436 437 unreserve: 438 amdgpu_bo_unreserve(robj); 439 out: 440 drm_gem_object_unreference_unlocked(gobj); 441 return r; 442 } 443 444 /** 445 * amdgpu_gem_va_update_vm -update the bo_va in its VM 446 * 447 * @adev: amdgpu_device pointer 448 * @bo_va: bo_va to update 449 * 450 * Update the bo_va directly after setting it's address. Errors are not 451 * vital here, so they are not reported back to userspace. 452 */ 453 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, 454 struct amdgpu_bo_va *bo_va, uint32_t operation) 455 { 456 struct ttm_validate_buffer tv, *entry; 457 struct amdgpu_bo_list_entry *vm_bos; 458 struct ww_acquire_ctx ticket; 459 struct list_head list, duplicates; 460 unsigned domain; 461 int r; 462 463 INIT_LIST_HEAD(&list); 464 INIT_LIST_HEAD(&duplicates); 465 466 tv.bo = &bo_va->bo->tbo; 467 tv.shared = true; 468 list_add(&tv.head, &list); 469 470 vm_bos = amdgpu_vm_get_bos(adev, bo_va->vm, &list); 471 if (!vm_bos) 472 return; 473 474 /* Provide duplicates to avoid -EALREADY */ 475 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); 476 if (r) 477 goto error_free; 478 479 list_for_each_entry(entry, &list, head) { 480 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); 481 /* if anything is swapped out don't swap it in here, 482 just abort and wait for the next CS */ 483 if (domain == AMDGPU_GEM_DOMAIN_CPU) 484 goto error_unreserve; 485 } 486 487 r = amdgpu_vm_clear_freed(adev, bo_va->vm); 488 if (r) 489 goto error_unreserve; 490 491 if (operation == AMDGPU_VA_OP_MAP) 492 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); 493 494 error_unreserve: 495 ttm_eu_backoff_reservation(&ticket, &list); 496 497 error_free: 498 drm_free_large(vm_bos); 499 500 if (r && r != -ERESTARTSYS) 501 DRM_ERROR("Couldn't update BO_VA (%d)\n", r); 502 } 503 504 505 506 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, 507 struct drm_file *filp) 508 { 509 struct drm_amdgpu_gem_va *args = data; 510 struct drm_gem_object *gobj; 511 struct amdgpu_device *adev = dev->dev_private; 512 struct amdgpu_fpriv *fpriv = filp->driver_priv; 513 struct amdgpu_bo *rbo; 514 struct amdgpu_bo_va *bo_va; 515 uint32_t invalid_flags, va_flags = 0; 516 int r = 0; 517 518 if (!adev->vm_manager.enabled) 519 return -ENOTTY; 520 521 if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { 522 dev_err(&dev->pdev->dev, 523 "va_address 0x%lX is in reserved area 0x%X\n", 524 (unsigned long)args->va_address, 525 AMDGPU_VA_RESERVED_SIZE); 526 return -EINVAL; 527 } 528 529 invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE | 530 AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE); 531 if ((args->flags & invalid_flags)) { 532 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", 533 args->flags, invalid_flags); 534 return -EINVAL; 535 } 536 537 switch (args->operation) { 538 case AMDGPU_VA_OP_MAP: 539 case AMDGPU_VA_OP_UNMAP: 540 break; 541 default: 542 dev_err(&dev->pdev->dev, "unsupported operation %d\n", 543 args->operation); 544 return -EINVAL; 545 } 546 547 gobj = drm_gem_object_lookup(dev, filp, args->handle); 548 if (gobj == NULL) 549 return -ENOENT; 550 mutex_lock(&fpriv->vm.mutex); 551 rbo = gem_to_amdgpu_bo(gobj); 552 r = amdgpu_bo_reserve(rbo, false); 553 if (r) { 554 mutex_unlock(&fpriv->vm.mutex); 555 drm_gem_object_unreference_unlocked(gobj); 556 return r; 557 } 558 559 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); 560 if (!bo_va) { 561 amdgpu_bo_unreserve(rbo); 562 mutex_unlock(&fpriv->vm.mutex); 563 return -ENOENT; 564 } 565 566 switch (args->operation) { 567 case AMDGPU_VA_OP_MAP: 568 if (args->flags & AMDGPU_VM_PAGE_READABLE) 569 va_flags |= AMDGPU_PTE_READABLE; 570 if (args->flags & AMDGPU_VM_PAGE_WRITEABLE) 571 va_flags |= AMDGPU_PTE_WRITEABLE; 572 if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE) 573 va_flags |= AMDGPU_PTE_EXECUTABLE; 574 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, 575 args->offset_in_bo, args->map_size, 576 va_flags); 577 break; 578 case AMDGPU_VA_OP_UNMAP: 579 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address); 580 break; 581 default: 582 break; 583 } 584 585 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) 586 amdgpu_gem_va_update_vm(adev, bo_va, args->operation); 587 mutex_unlock(&fpriv->vm.mutex); 588 drm_gem_object_unreference_unlocked(gobj); 589 return r; 590 } 591 592 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, 593 struct drm_file *filp) 594 { 595 struct drm_amdgpu_gem_op *args = data; 596 struct drm_gem_object *gobj; 597 struct amdgpu_bo *robj; 598 int r; 599 600 gobj = drm_gem_object_lookup(dev, filp, args->handle); 601 if (gobj == NULL) { 602 return -ENOENT; 603 } 604 robj = gem_to_amdgpu_bo(gobj); 605 606 r = amdgpu_bo_reserve(robj, false); 607 if (unlikely(r)) 608 goto out; 609 610 switch (args->op) { 611 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: { 612 struct drm_amdgpu_gem_create_in info; 613 void __user *out = (void __user *)(long)args->value; 614 615 info.bo_size = robj->gem_base.size; 616 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT; 617 info.domains = robj->initial_domain; 618 info.domain_flags = robj->flags; 619 amdgpu_bo_unreserve(robj); 620 if (copy_to_user(out, &info, sizeof(info))) 621 r = -EFAULT; 622 break; 623 } 624 case AMDGPU_GEM_OP_SET_PLACEMENT: 625 if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) { 626 r = -EPERM; 627 amdgpu_bo_unreserve(robj); 628 break; 629 } 630 robj->initial_domain = args->value & (AMDGPU_GEM_DOMAIN_VRAM | 631 AMDGPU_GEM_DOMAIN_GTT | 632 AMDGPU_GEM_DOMAIN_CPU); 633 amdgpu_bo_unreserve(robj); 634 break; 635 default: 636 amdgpu_bo_unreserve(robj); 637 r = -EINVAL; 638 } 639 640 out: 641 drm_gem_object_unreference_unlocked(gobj); 642 return r; 643 } 644 645 int amdgpu_mode_dumb_create(struct drm_file *file_priv, 646 struct drm_device *dev, 647 struct drm_mode_create_dumb *args) 648 { 649 struct amdgpu_device *adev = dev->dev_private; 650 struct drm_gem_object *gobj; 651 uint32_t handle; 652 int r; 653 654 args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); 655 args->size = (u64)args->pitch * args->height; 656 args->size = ALIGN(args->size, PAGE_SIZE); 657 658 r = amdgpu_gem_object_create(adev, args->size, 0, 659 AMDGPU_GEM_DOMAIN_VRAM, 660 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 661 ttm_bo_type_device, 662 &gobj); 663 if (r) 664 return -ENOMEM; 665 666 r = drm_gem_handle_create(file_priv, gobj, &handle); 667 /* drop reference from allocate - handle holds it now */ 668 drm_gem_object_unreference_unlocked(gobj); 669 if (r) { 670 return r; 671 } 672 args->handle = handle; 673 return 0; 674 } 675 676 #if defined(CONFIG_DEBUG_FS) 677 static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data) 678 { 679 struct drm_info_node *node = (struct drm_info_node *)m->private; 680 struct drm_device *dev = node->minor->dev; 681 struct amdgpu_device *adev = dev->dev_private; 682 struct amdgpu_bo *rbo; 683 unsigned i = 0; 684 685 mutex_lock(&adev->gem.mutex); 686 list_for_each_entry(rbo, &adev->gem.objects, list) { 687 unsigned domain; 688 const char *placement; 689 690 domain = amdgpu_mem_type_to_domain(rbo->tbo.mem.mem_type); 691 switch (domain) { 692 case AMDGPU_GEM_DOMAIN_VRAM: 693 placement = "VRAM"; 694 break; 695 case AMDGPU_GEM_DOMAIN_GTT: 696 placement = " GTT"; 697 break; 698 case AMDGPU_GEM_DOMAIN_CPU: 699 default: 700 placement = " CPU"; 701 break; 702 } 703 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", 704 i, amdgpu_bo_size(rbo) >> 10, amdgpu_bo_size(rbo) >> 20, 705 placement, (unsigned long)rbo->pid); 706 i++; 707 } 708 mutex_unlock(&adev->gem.mutex); 709 return 0; 710 } 711 712 static struct drm_info_list amdgpu_debugfs_gem_list[] = { 713 {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL}, 714 }; 715 #endif 716 717 int amdgpu_gem_debugfs_init(struct amdgpu_device *adev) 718 { 719 #if defined(CONFIG_DEBUG_FS) 720 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1); 721 #endif 722 return 0; 723 } 724