1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/ktime.h> 29 #include <drm/drmP.h> 30 #include <drm/amdgpu_drm.h> 31 #include "amdgpu.h" 32 33 void amdgpu_gem_object_free(struct drm_gem_object *gobj) 34 { 35 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); 36 37 if (robj) { 38 if (robj->gem_base.import_attach) 39 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); 40 amdgpu_mn_unregister(robj); 41 amdgpu_bo_unref(&robj); 42 } 43 } 44 45 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 46 int alignment, u32 initial_domain, 47 u64 flags, bool kernel, 48 struct drm_gem_object **obj) 49 { 50 struct amdgpu_bo *robj; 51 unsigned long max_size; 52 int r; 53 54 *obj = NULL; 55 /* At least align on page size */ 56 if (alignment < PAGE_SIZE) { 57 alignment = PAGE_SIZE; 58 } 59 60 if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) { 61 /* Maximum bo size is the unpinned gtt size since we use the gtt to 62 * handle vram to system pool migrations. 63 */ 64 max_size = adev->mc.gtt_size - adev->gart_pin_size; 65 if (size > max_size) { 66 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", 67 size >> 20, max_size >> 20); 68 return -ENOMEM; 69 } 70 } 71 retry: 72 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, 73 flags, NULL, NULL, &robj); 74 if (r) { 75 if (r != -ERESTARTSYS) { 76 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { 77 initial_domain |= AMDGPU_GEM_DOMAIN_GTT; 78 goto retry; 79 } 80 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", 81 size, initial_domain, alignment, r); 82 } 83 return r; 84 } 85 *obj = &robj->gem_base; 86 robj->pid = task_pid_nr(current); 87 88 mutex_lock(&adev->gem.mutex); 89 list_add_tail(&robj->list, &adev->gem.objects); 90 mutex_unlock(&adev->gem.mutex); 91 92 return 0; 93 } 94 95 int amdgpu_gem_init(struct amdgpu_device *adev) 96 { 97 INIT_LIST_HEAD(&adev->gem.objects); 98 return 0; 99 } 100 101 void amdgpu_gem_fini(struct amdgpu_device *adev) 102 { 103 amdgpu_bo_force_delete(adev); 104 } 105 106 /* 107 * Call from drm_gem_handle_create which appear in both new and open ioctl 108 * case. 109 */ 110 int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 111 { 112 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj); 113 struct amdgpu_device *adev = rbo->adev; 114 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 115 struct amdgpu_vm *vm = &fpriv->vm; 116 struct amdgpu_bo_va *bo_va; 117 int r; 118 r = amdgpu_bo_reserve(rbo, false); 119 if (r) 120 return r; 121 122 bo_va = amdgpu_vm_bo_find(vm, rbo); 123 if (!bo_va) { 124 bo_va = amdgpu_vm_bo_add(adev, vm, rbo); 125 } else { 126 ++bo_va->ref_count; 127 } 128 amdgpu_bo_unreserve(rbo); 129 return 0; 130 } 131 132 void amdgpu_gem_object_close(struct drm_gem_object *obj, 133 struct drm_file *file_priv) 134 { 135 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj); 136 struct amdgpu_device *adev = rbo->adev; 137 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 138 struct amdgpu_vm *vm = &fpriv->vm; 139 struct amdgpu_bo_va *bo_va; 140 int r; 141 r = amdgpu_bo_reserve(rbo, true); 142 if (r) { 143 dev_err(adev->dev, "leaking bo va because " 144 "we fail to reserve bo (%d)\n", r); 145 return; 146 } 147 bo_va = amdgpu_vm_bo_find(vm, rbo); 148 if (bo_va) { 149 if (--bo_va->ref_count == 0) { 150 amdgpu_vm_bo_rmv(adev, bo_va); 151 } 152 } 153 amdgpu_bo_unreserve(rbo); 154 } 155 156 static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) 157 { 158 if (r == -EDEADLK) { 159 r = amdgpu_gpu_reset(adev); 160 if (!r) 161 r = -EAGAIN; 162 } 163 return r; 164 } 165 166 /* 167 * GEM ioctls. 168 */ 169 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, 170 struct drm_file *filp) 171 { 172 struct amdgpu_device *adev = dev->dev_private; 173 union drm_amdgpu_gem_create *args = data; 174 uint64_t size = args->in.bo_size; 175 struct drm_gem_object *gobj; 176 uint32_t handle; 177 bool kernel = false; 178 int r; 179 180 /* create a gem object to contain this object in */ 181 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | 182 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { 183 kernel = true; 184 if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS) 185 size = size << AMDGPU_GDS_SHIFT; 186 else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS) 187 size = size << AMDGPU_GWS_SHIFT; 188 else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA) 189 size = size << AMDGPU_OA_SHIFT; 190 else { 191 r = -EINVAL; 192 goto error_unlock; 193 } 194 } 195 size = roundup(size, PAGE_SIZE); 196 197 r = amdgpu_gem_object_create(adev, size, args->in.alignment, 198 (u32)(0xffffffff & args->in.domains), 199 args->in.domain_flags, 200 kernel, &gobj); 201 if (r) 202 goto error_unlock; 203 204 r = drm_gem_handle_create(filp, gobj, &handle); 205 /* drop reference from allocate - handle holds it now */ 206 drm_gem_object_unreference_unlocked(gobj); 207 if (r) 208 goto error_unlock; 209 210 memset(args, 0, sizeof(*args)); 211 args->out.handle = handle; 212 return 0; 213 214 error_unlock: 215 r = amdgpu_gem_handle_lockup(adev, r); 216 return r; 217 } 218 219 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, 220 struct drm_file *filp) 221 { 222 struct amdgpu_device *adev = dev->dev_private; 223 struct drm_amdgpu_gem_userptr *args = data; 224 struct drm_gem_object *gobj; 225 struct amdgpu_bo *bo; 226 uint32_t handle; 227 int r; 228 229 if (offset_in_page(args->addr | args->size)) 230 return -EINVAL; 231 232 /* reject unknown flag values */ 233 if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY | 234 AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE | 235 AMDGPU_GEM_USERPTR_REGISTER)) 236 return -EINVAL; 237 238 if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && ( 239 !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) || 240 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) { 241 242 /* if we want to write to it we must require anonymous 243 memory and install a MMU notifier */ 244 return -EACCES; 245 } 246 247 /* create a gem object to contain this object in */ 248 r = amdgpu_gem_object_create(adev, args->size, 0, 249 AMDGPU_GEM_DOMAIN_CPU, 0, 250 0, &gobj); 251 if (r) 252 goto handle_lockup; 253 254 bo = gem_to_amdgpu_bo(gobj); 255 bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT; 256 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; 257 r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); 258 if (r) 259 goto release_object; 260 261 if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) { 262 r = amdgpu_mn_register(bo, args->addr); 263 if (r) 264 goto release_object; 265 } 266 267 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { 268 down_read(¤t->mm->mmap_sem); 269 r = amdgpu_bo_reserve(bo, true); 270 if (r) { 271 up_read(¤t->mm->mmap_sem); 272 goto release_object; 273 } 274 275 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 276 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 277 amdgpu_bo_unreserve(bo); 278 up_read(¤t->mm->mmap_sem); 279 if (r) 280 goto release_object; 281 } 282 283 r = drm_gem_handle_create(filp, gobj, &handle); 284 /* drop reference from allocate - handle holds it now */ 285 drm_gem_object_unreference_unlocked(gobj); 286 if (r) 287 goto handle_lockup; 288 289 args->handle = handle; 290 return 0; 291 292 release_object: 293 drm_gem_object_unreference_unlocked(gobj); 294 295 handle_lockup: 296 r = amdgpu_gem_handle_lockup(adev, r); 297 298 return r; 299 } 300 301 int amdgpu_mode_dumb_mmap(struct drm_file *filp, 302 struct drm_device *dev, 303 uint32_t handle, uint64_t *offset_p) 304 { 305 struct drm_gem_object *gobj; 306 struct amdgpu_bo *robj; 307 308 gobj = drm_gem_object_lookup(dev, filp, handle); 309 if (gobj == NULL) { 310 return -ENOENT; 311 } 312 robj = gem_to_amdgpu_bo(gobj); 313 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || 314 (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { 315 drm_gem_object_unreference_unlocked(gobj); 316 return -EPERM; 317 } 318 *offset_p = amdgpu_bo_mmap_offset(robj); 319 drm_gem_object_unreference_unlocked(gobj); 320 return 0; 321 } 322 323 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, 324 struct drm_file *filp) 325 { 326 union drm_amdgpu_gem_mmap *args = data; 327 uint32_t handle = args->in.handle; 328 memset(args, 0, sizeof(*args)); 329 return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr); 330 } 331 332 /** 333 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value 334 * 335 * @timeout_ns: timeout in ns 336 * 337 * Calculate the timeout in jiffies from an absolute timeout in ns. 338 */ 339 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns) 340 { 341 unsigned long timeout_jiffies; 342 ktime_t timeout; 343 344 /* clamp timeout if it's to large */ 345 if (((int64_t)timeout_ns) < 0) 346 return MAX_SCHEDULE_TIMEOUT; 347 348 timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get()); 349 if (ktime_to_ns(timeout) < 0) 350 return 0; 351 352 timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout)); 353 /* clamp timeout to avoid unsigned-> signed overflow */ 354 if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT ) 355 return MAX_SCHEDULE_TIMEOUT - 1; 356 357 return timeout_jiffies; 358 } 359 360 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 361 struct drm_file *filp) 362 { 363 struct amdgpu_device *adev = dev->dev_private; 364 union drm_amdgpu_gem_wait_idle *args = data; 365 struct drm_gem_object *gobj; 366 struct amdgpu_bo *robj; 367 uint32_t handle = args->in.handle; 368 unsigned long timeout = amdgpu_gem_timeout(args->in.timeout); 369 int r = 0; 370 long ret; 371 372 gobj = drm_gem_object_lookup(dev, filp, handle); 373 if (gobj == NULL) { 374 return -ENOENT; 375 } 376 robj = gem_to_amdgpu_bo(gobj); 377 if (timeout == 0) 378 ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true); 379 else 380 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout); 381 382 /* ret == 0 means not signaled, 383 * ret > 0 means signaled 384 * ret < 0 means interrupted before timeout 385 */ 386 if (ret >= 0) { 387 memset(args, 0, sizeof(*args)); 388 args->out.status = (ret == 0); 389 } else 390 r = ret; 391 392 drm_gem_object_unreference_unlocked(gobj); 393 r = amdgpu_gem_handle_lockup(adev, r); 394 return r; 395 } 396 397 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, 398 struct drm_file *filp) 399 { 400 struct drm_amdgpu_gem_metadata *args = data; 401 struct drm_gem_object *gobj; 402 struct amdgpu_bo *robj; 403 int r = -1; 404 405 DRM_DEBUG("%d \n", args->handle); 406 gobj = drm_gem_object_lookup(dev, filp, args->handle); 407 if (gobj == NULL) 408 return -ENOENT; 409 robj = gem_to_amdgpu_bo(gobj); 410 411 r = amdgpu_bo_reserve(robj, false); 412 if (unlikely(r != 0)) 413 goto out; 414 415 if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) { 416 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info); 417 r = amdgpu_bo_get_metadata(robj, args->data.data, 418 sizeof(args->data.data), 419 &args->data.data_size_bytes, 420 &args->data.flags); 421 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) { 422 if (args->data.data_size_bytes > sizeof(args->data.data)) { 423 r = -EINVAL; 424 goto unreserve; 425 } 426 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info); 427 if (!r) 428 r = amdgpu_bo_set_metadata(robj, args->data.data, 429 args->data.data_size_bytes, 430 args->data.flags); 431 } 432 433 unreserve: 434 amdgpu_bo_unreserve(robj); 435 out: 436 drm_gem_object_unreference_unlocked(gobj); 437 return r; 438 } 439 440 /** 441 * amdgpu_gem_va_update_vm -update the bo_va in its VM 442 * 443 * @adev: amdgpu_device pointer 444 * @bo_va: bo_va to update 445 * 446 * Update the bo_va directly after setting it's address. Errors are not 447 * vital here, so they are not reported back to userspace. 448 */ 449 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, 450 struct amdgpu_bo_va *bo_va, uint32_t operation) 451 { 452 struct ttm_validate_buffer tv, *entry; 453 struct amdgpu_bo_list_entry vm_pd; 454 struct ww_acquire_ctx ticket; 455 struct list_head list, duplicates; 456 unsigned domain; 457 int r; 458 459 INIT_LIST_HEAD(&list); 460 INIT_LIST_HEAD(&duplicates); 461 462 tv.bo = &bo_va->bo->tbo; 463 tv.shared = true; 464 list_add(&tv.head, &list); 465 466 amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd); 467 468 /* Provide duplicates to avoid -EALREADY */ 469 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); 470 if (r) 471 goto error_print; 472 473 amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates); 474 list_for_each_entry(entry, &list, head) { 475 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); 476 /* if anything is swapped out don't swap it in here, 477 just abort and wait for the next CS */ 478 if (domain == AMDGPU_GEM_DOMAIN_CPU) 479 goto error_unreserve; 480 } 481 list_for_each_entry(entry, &duplicates, head) { 482 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); 483 /* if anything is swapped out don't swap it in here, 484 just abort and wait for the next CS */ 485 if (domain == AMDGPU_GEM_DOMAIN_CPU) 486 goto error_unreserve; 487 } 488 489 r = amdgpu_vm_update_page_directory(adev, bo_va->vm); 490 if (r) 491 goto error_unreserve; 492 493 r = amdgpu_vm_clear_freed(adev, bo_va->vm); 494 if (r) 495 goto error_unreserve; 496 497 if (operation == AMDGPU_VA_OP_MAP) 498 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); 499 500 error_unreserve: 501 ttm_eu_backoff_reservation(&ticket, &list); 502 503 error_print: 504 if (r && r != -ERESTARTSYS) 505 DRM_ERROR("Couldn't update BO_VA (%d)\n", r); 506 } 507 508 509 510 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, 511 struct drm_file *filp) 512 { 513 struct drm_amdgpu_gem_va *args = data; 514 struct drm_gem_object *gobj; 515 struct amdgpu_device *adev = dev->dev_private; 516 struct amdgpu_fpriv *fpriv = filp->driver_priv; 517 struct amdgpu_bo *rbo; 518 struct amdgpu_bo_va *bo_va; 519 struct ttm_validate_buffer tv, tv_pd; 520 struct ww_acquire_ctx ticket; 521 struct list_head list, duplicates; 522 uint32_t invalid_flags, va_flags = 0; 523 int r = 0; 524 525 if (!adev->vm_manager.enabled) 526 return -ENOTTY; 527 528 if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { 529 dev_err(&dev->pdev->dev, 530 "va_address 0x%lX is in reserved area 0x%X\n", 531 (unsigned long)args->va_address, 532 AMDGPU_VA_RESERVED_SIZE); 533 return -EINVAL; 534 } 535 536 invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE | 537 AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE); 538 if ((args->flags & invalid_flags)) { 539 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", 540 args->flags, invalid_flags); 541 return -EINVAL; 542 } 543 544 switch (args->operation) { 545 case AMDGPU_VA_OP_MAP: 546 case AMDGPU_VA_OP_UNMAP: 547 break; 548 default: 549 dev_err(&dev->pdev->dev, "unsupported operation %d\n", 550 args->operation); 551 return -EINVAL; 552 } 553 554 gobj = drm_gem_object_lookup(dev, filp, args->handle); 555 if (gobj == NULL) 556 return -ENOENT; 557 rbo = gem_to_amdgpu_bo(gobj); 558 INIT_LIST_HEAD(&list); 559 INIT_LIST_HEAD(&duplicates); 560 tv.bo = &rbo->tbo; 561 tv.shared = true; 562 list_add(&tv.head, &list); 563 564 if (args->operation == AMDGPU_VA_OP_MAP) { 565 tv_pd.bo = &fpriv->vm.page_directory->tbo; 566 tv_pd.shared = true; 567 list_add(&tv_pd.head, &list); 568 } 569 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); 570 if (r) { 571 drm_gem_object_unreference_unlocked(gobj); 572 return r; 573 } 574 575 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); 576 if (!bo_va) { 577 ttm_eu_backoff_reservation(&ticket, &list); 578 drm_gem_object_unreference_unlocked(gobj); 579 return -ENOENT; 580 } 581 582 switch (args->operation) { 583 case AMDGPU_VA_OP_MAP: 584 if (args->flags & AMDGPU_VM_PAGE_READABLE) 585 va_flags |= AMDGPU_PTE_READABLE; 586 if (args->flags & AMDGPU_VM_PAGE_WRITEABLE) 587 va_flags |= AMDGPU_PTE_WRITEABLE; 588 if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE) 589 va_flags |= AMDGPU_PTE_EXECUTABLE; 590 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, 591 args->offset_in_bo, args->map_size, 592 va_flags); 593 break; 594 case AMDGPU_VA_OP_UNMAP: 595 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address); 596 break; 597 default: 598 break; 599 } 600 ttm_eu_backoff_reservation(&ticket, &list); 601 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) 602 amdgpu_gem_va_update_vm(adev, bo_va, args->operation); 603 604 drm_gem_object_unreference_unlocked(gobj); 605 return r; 606 } 607 608 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, 609 struct drm_file *filp) 610 { 611 struct drm_amdgpu_gem_op *args = data; 612 struct drm_gem_object *gobj; 613 struct amdgpu_bo *robj; 614 int r; 615 616 gobj = drm_gem_object_lookup(dev, filp, args->handle); 617 if (gobj == NULL) { 618 return -ENOENT; 619 } 620 robj = gem_to_amdgpu_bo(gobj); 621 622 r = amdgpu_bo_reserve(robj, false); 623 if (unlikely(r)) 624 goto out; 625 626 switch (args->op) { 627 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: { 628 struct drm_amdgpu_gem_create_in info; 629 void __user *out = (void __user *)(long)args->value; 630 631 info.bo_size = robj->gem_base.size; 632 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT; 633 info.domains = robj->prefered_domains; 634 info.domain_flags = robj->flags; 635 amdgpu_bo_unreserve(robj); 636 if (copy_to_user(out, &info, sizeof(info))) 637 r = -EFAULT; 638 break; 639 } 640 case AMDGPU_GEM_OP_SET_PLACEMENT: 641 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) { 642 r = -EPERM; 643 amdgpu_bo_unreserve(robj); 644 break; 645 } 646 robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM | 647 AMDGPU_GEM_DOMAIN_GTT | 648 AMDGPU_GEM_DOMAIN_CPU); 649 robj->allowed_domains = robj->prefered_domains; 650 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) 651 robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; 652 653 amdgpu_bo_unreserve(robj); 654 break; 655 default: 656 amdgpu_bo_unreserve(robj); 657 r = -EINVAL; 658 } 659 660 out: 661 drm_gem_object_unreference_unlocked(gobj); 662 return r; 663 } 664 665 int amdgpu_mode_dumb_create(struct drm_file *file_priv, 666 struct drm_device *dev, 667 struct drm_mode_create_dumb *args) 668 { 669 struct amdgpu_device *adev = dev->dev_private; 670 struct drm_gem_object *gobj; 671 uint32_t handle; 672 int r; 673 674 args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); 675 args->size = (u64)args->pitch * args->height; 676 args->size = ALIGN(args->size, PAGE_SIZE); 677 678 r = amdgpu_gem_object_create(adev, args->size, 0, 679 AMDGPU_GEM_DOMAIN_VRAM, 680 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 681 ttm_bo_type_device, 682 &gobj); 683 if (r) 684 return -ENOMEM; 685 686 r = drm_gem_handle_create(file_priv, gobj, &handle); 687 /* drop reference from allocate - handle holds it now */ 688 drm_gem_object_unreference_unlocked(gobj); 689 if (r) { 690 return r; 691 } 692 args->handle = handle; 693 return 0; 694 } 695 696 #if defined(CONFIG_DEBUG_FS) 697 static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data) 698 { 699 struct drm_info_node *node = (struct drm_info_node *)m->private; 700 struct drm_device *dev = node->minor->dev; 701 struct amdgpu_device *adev = dev->dev_private; 702 struct amdgpu_bo *rbo; 703 unsigned i = 0; 704 705 mutex_lock(&adev->gem.mutex); 706 list_for_each_entry(rbo, &adev->gem.objects, list) { 707 unsigned domain; 708 const char *placement; 709 710 domain = amdgpu_mem_type_to_domain(rbo->tbo.mem.mem_type); 711 switch (domain) { 712 case AMDGPU_GEM_DOMAIN_VRAM: 713 placement = "VRAM"; 714 break; 715 case AMDGPU_GEM_DOMAIN_GTT: 716 placement = " GTT"; 717 break; 718 case AMDGPU_GEM_DOMAIN_CPU: 719 default: 720 placement = " CPU"; 721 break; 722 } 723 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", 724 i, amdgpu_bo_size(rbo) >> 10, amdgpu_bo_size(rbo) >> 20, 725 placement, (unsigned long)rbo->pid); 726 i++; 727 } 728 mutex_unlock(&adev->gem.mutex); 729 return 0; 730 } 731 732 static struct drm_info_list amdgpu_debugfs_gem_list[] = { 733 {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL}, 734 }; 735 #endif 736 737 int amdgpu_gem_debugfs_init(struct amdgpu_device *adev) 738 { 739 #if defined(CONFIG_DEBUG_FS) 740 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1); 741 #endif 742 return 0; 743 } 744