1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/ktime.h> 29 #include <linux/module.h> 30 #include <linux/pagemap.h> 31 #include <linux/pci.h> 32 #include <linux/dma-buf.h> 33 34 #include <drm/amdgpu_drm.h> 35 #include <drm/drm_drv.h> 36 #include <drm/drm_gem_ttm_helper.h> 37 38 #include "amdgpu.h" 39 #include "amdgpu_display.h" 40 #include "amdgpu_dma_buf.h" 41 #include "amdgpu_xgmi.h" 42 43 static const struct drm_gem_object_funcs amdgpu_gem_object_funcs; 44 45 static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf) 46 { 47 struct ttm_buffer_object *bo = vmf->vma->vm_private_data; 48 struct drm_device *ddev = bo->base.dev; 49 vm_fault_t ret; 50 int idx; 51 52 ret = ttm_bo_vm_reserve(bo, vmf); 53 if (ret) 54 return ret; 55 56 if (drm_dev_enter(ddev, &idx)) { 57 ret = amdgpu_bo_fault_reserve_notify(bo); 58 if (ret) { 59 drm_dev_exit(idx); 60 goto unlock; 61 } 62 63 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, 64 TTM_BO_VM_NUM_PREFAULT); 65 66 drm_dev_exit(idx); 67 } else { 68 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); 69 } 70 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) 71 return ret; 72 73 unlock: 74 dma_resv_unlock(bo->base.resv); 75 return ret; 76 } 77 78 static const struct vm_operations_struct amdgpu_gem_vm_ops = { 79 .fault = amdgpu_gem_fault, 80 .open = ttm_bo_vm_open, 81 .close = ttm_bo_vm_close, 82 .access = ttm_bo_vm_access 83 }; 84 85 static void amdgpu_gem_object_free(struct drm_gem_object *gobj) 86 { 87 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); 88 89 if (robj) { 90 amdgpu_mn_unregister(robj); 91 amdgpu_bo_unref(&robj); 92 } 93 } 94 95 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 96 int alignment, u32 initial_domain, 97 u64 flags, enum ttm_bo_type type, 98 struct dma_resv *resv, 99 struct drm_gem_object **obj) 100 { 101 struct amdgpu_bo *bo; 102 struct amdgpu_bo_user *ubo; 103 struct amdgpu_bo_param bp; 104 int r; 105 106 memset(&bp, 0, sizeof(bp)); 107 *obj = NULL; 108 109 bp.size = size; 110 bp.byte_align = alignment; 111 bp.type = type; 112 bp.resv = resv; 113 bp.preferred_domain = initial_domain; 114 bp.flags = flags; 115 bp.domain = initial_domain; 116 bp.bo_ptr_size = sizeof(struct amdgpu_bo); 117 118 r = amdgpu_bo_create_user(adev, &bp, &ubo); 119 if (r) 120 return r; 121 122 bo = &ubo->bo; 123 *obj = &bo->tbo.base; 124 (*obj)->funcs = &amdgpu_gem_object_funcs; 125 126 return 0; 127 } 128 129 void amdgpu_gem_force_release(struct amdgpu_device *adev) 130 { 131 struct drm_device *ddev = adev_to_drm(adev); 132 struct drm_file *file; 133 134 mutex_lock(&ddev->filelist_mutex); 135 136 list_for_each_entry(file, &ddev->filelist, lhead) { 137 struct drm_gem_object *gobj; 138 int handle; 139 140 WARN_ONCE(1, "Still active user space clients!\n"); 141 spin_lock(&file->table_lock); 142 idr_for_each_entry(&file->object_idr, gobj, handle) { 143 WARN_ONCE(1, "And also active allocations!\n"); 144 drm_gem_object_put(gobj); 145 } 146 idr_destroy(&file->object_idr); 147 spin_unlock(&file->table_lock); 148 } 149 150 mutex_unlock(&ddev->filelist_mutex); 151 } 152 153 /* 154 * Call from drm_gem_handle_create which appear in both new and open ioctl 155 * case. 156 */ 157 static int amdgpu_gem_object_open(struct drm_gem_object *obj, 158 struct drm_file *file_priv) 159 { 160 struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj); 161 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); 162 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 163 struct amdgpu_vm *vm = &fpriv->vm; 164 struct amdgpu_bo_va *bo_va; 165 struct mm_struct *mm; 166 int r; 167 168 mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm); 169 if (mm && mm != current->mm) 170 return -EPERM; 171 172 if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID && 173 abo->tbo.base.resv != vm->root.bo->tbo.base.resv) 174 return -EPERM; 175 176 r = amdgpu_bo_reserve(abo, false); 177 if (r) 178 return r; 179 180 bo_va = amdgpu_vm_bo_find(vm, abo); 181 if (!bo_va) { 182 bo_va = amdgpu_vm_bo_add(adev, vm, abo); 183 } else { 184 ++bo_va->ref_count; 185 } 186 amdgpu_bo_unreserve(abo); 187 return 0; 188 } 189 190 static void amdgpu_gem_object_close(struct drm_gem_object *obj, 191 struct drm_file *file_priv) 192 { 193 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 194 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 195 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 196 struct amdgpu_vm *vm = &fpriv->vm; 197 198 struct amdgpu_bo_list_entry vm_pd; 199 struct list_head list, duplicates; 200 struct dma_fence *fence = NULL; 201 struct ttm_validate_buffer tv; 202 struct ww_acquire_ctx ticket; 203 struct amdgpu_bo_va *bo_va; 204 long r; 205 206 INIT_LIST_HEAD(&list); 207 INIT_LIST_HEAD(&duplicates); 208 209 tv.bo = &bo->tbo; 210 tv.num_shared = 2; 211 list_add(&tv.head, &list); 212 213 amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); 214 215 r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates); 216 if (r) { 217 dev_err(adev->dev, "leaking bo va because " 218 "we fail to reserve bo (%ld)\n", r); 219 return; 220 } 221 bo_va = amdgpu_vm_bo_find(vm, bo); 222 if (!bo_va || --bo_va->ref_count) 223 goto out_unlock; 224 225 amdgpu_vm_bo_del(adev, bo_va); 226 if (!amdgpu_vm_ready(vm)) 227 goto out_unlock; 228 229 r = amdgpu_vm_clear_freed(adev, vm, &fence); 230 if (r || !fence) 231 goto out_unlock; 232 233 amdgpu_bo_fence(bo, fence, true); 234 dma_fence_put(fence); 235 236 out_unlock: 237 if (unlikely(r < 0)) 238 dev_err(adev->dev, "failed to clear page " 239 "tables on GEM object close (%ld)\n", r); 240 ttm_eu_backoff_reservation(&ticket, &list); 241 } 242 243 static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 244 { 245 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 246 247 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) 248 return -EPERM; 249 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) 250 return -EPERM; 251 252 /* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings 253 * for debugger access to invisible VRAM. Should have used MAP_SHARED 254 * instead. Clearing VM_MAYWRITE prevents the mapping from ever 255 * becoming writable and makes is_cow_mapping(vm_flags) false. 256 */ 257 if (is_cow_mapping(vma->vm_flags) && 258 !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) 259 vma->vm_flags &= ~VM_MAYWRITE; 260 261 return drm_gem_ttm_mmap(obj, vma); 262 } 263 264 static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = { 265 .free = amdgpu_gem_object_free, 266 .open = amdgpu_gem_object_open, 267 .close = amdgpu_gem_object_close, 268 .export = amdgpu_gem_prime_export, 269 .vmap = drm_gem_ttm_vmap, 270 .vunmap = drm_gem_ttm_vunmap, 271 .mmap = amdgpu_gem_object_mmap, 272 .vm_ops = &amdgpu_gem_vm_ops, 273 }; 274 275 /* 276 * GEM ioctls. 277 */ 278 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, 279 struct drm_file *filp) 280 { 281 struct amdgpu_device *adev = drm_to_adev(dev); 282 struct amdgpu_fpriv *fpriv = filp->driver_priv; 283 struct amdgpu_vm *vm = &fpriv->vm; 284 union drm_amdgpu_gem_create *args = data; 285 uint64_t flags = args->in.domain_flags; 286 uint64_t size = args->in.bo_size; 287 struct dma_resv *resv = NULL; 288 struct drm_gem_object *gobj; 289 uint32_t handle, initial_domain; 290 int r; 291 292 /* reject invalid gem flags */ 293 if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 294 AMDGPU_GEM_CREATE_NO_CPU_ACCESS | 295 AMDGPU_GEM_CREATE_CPU_GTT_USWC | 296 AMDGPU_GEM_CREATE_VRAM_CLEARED | 297 AMDGPU_GEM_CREATE_VM_ALWAYS_VALID | 298 AMDGPU_GEM_CREATE_EXPLICIT_SYNC | 299 AMDGPU_GEM_CREATE_ENCRYPTED)) 300 301 return -EINVAL; 302 303 /* reject invalid gem domains */ 304 if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK) 305 return -EINVAL; 306 307 if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) { 308 DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n"); 309 return -EINVAL; 310 } 311 312 /* create a gem object to contain this object in */ 313 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | 314 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { 315 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { 316 /* if gds bo is created from user space, it must be 317 * passed to bo list 318 */ 319 DRM_ERROR("GDS bo cannot be per-vm-bo\n"); 320 return -EINVAL; 321 } 322 flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS; 323 } 324 325 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { 326 r = amdgpu_bo_reserve(vm->root.bo, false); 327 if (r) 328 return r; 329 330 resv = vm->root.bo->tbo.base.resv; 331 } 332 333 initial_domain = (u32)(0xffffffff & args->in.domains); 334 retry: 335 r = amdgpu_gem_object_create(adev, size, args->in.alignment, 336 initial_domain, 337 flags, ttm_bo_type_device, resv, &gobj); 338 if (r && r != -ERESTARTSYS) { 339 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { 340 flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 341 goto retry; 342 } 343 344 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { 345 initial_domain |= AMDGPU_GEM_DOMAIN_GTT; 346 goto retry; 347 } 348 DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n", 349 size, initial_domain, args->in.alignment, r); 350 } 351 352 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { 353 if (!r) { 354 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); 355 356 abo->parent = amdgpu_bo_ref(vm->root.bo); 357 } 358 amdgpu_bo_unreserve(vm->root.bo); 359 } 360 if (r) 361 return r; 362 363 r = drm_gem_handle_create(filp, gobj, &handle); 364 /* drop reference from allocate - handle holds it now */ 365 drm_gem_object_put(gobj); 366 if (r) 367 return r; 368 369 memset(args, 0, sizeof(*args)); 370 args->out.handle = handle; 371 return 0; 372 } 373 374 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, 375 struct drm_file *filp) 376 { 377 struct ttm_operation_ctx ctx = { true, false }; 378 struct amdgpu_device *adev = drm_to_adev(dev); 379 struct drm_amdgpu_gem_userptr *args = data; 380 struct drm_gem_object *gobj; 381 struct amdgpu_bo *bo; 382 uint32_t handle; 383 int r; 384 385 args->addr = untagged_addr(args->addr); 386 387 if (offset_in_page(args->addr | args->size)) 388 return -EINVAL; 389 390 /* reject unknown flag values */ 391 if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY | 392 AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE | 393 AMDGPU_GEM_USERPTR_REGISTER)) 394 return -EINVAL; 395 396 if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && 397 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) { 398 399 /* if we want to write to it we must install a MMU notifier */ 400 return -EACCES; 401 } 402 403 /* create a gem object to contain this object in */ 404 r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU, 405 0, ttm_bo_type_device, NULL, &gobj); 406 if (r) 407 return r; 408 409 bo = gem_to_amdgpu_bo(gobj); 410 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; 411 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; 412 r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags); 413 if (r) 414 goto release_object; 415 416 if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) { 417 r = amdgpu_mn_register(bo, args->addr); 418 if (r) 419 goto release_object; 420 } 421 422 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { 423 r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); 424 if (r) 425 goto release_object; 426 427 r = amdgpu_bo_reserve(bo, true); 428 if (r) 429 goto user_pages_done; 430 431 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 432 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 433 amdgpu_bo_unreserve(bo); 434 if (r) 435 goto user_pages_done; 436 } 437 438 r = drm_gem_handle_create(filp, gobj, &handle); 439 if (r) 440 goto user_pages_done; 441 442 args->handle = handle; 443 444 user_pages_done: 445 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) 446 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); 447 448 release_object: 449 drm_gem_object_put(gobj); 450 451 return r; 452 } 453 454 int amdgpu_mode_dumb_mmap(struct drm_file *filp, 455 struct drm_device *dev, 456 uint32_t handle, uint64_t *offset_p) 457 { 458 struct drm_gem_object *gobj; 459 struct amdgpu_bo *robj; 460 461 gobj = drm_gem_object_lookup(filp, handle); 462 if (gobj == NULL) { 463 return -ENOENT; 464 } 465 robj = gem_to_amdgpu_bo(gobj); 466 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || 467 (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { 468 drm_gem_object_put(gobj); 469 return -EPERM; 470 } 471 *offset_p = amdgpu_bo_mmap_offset(robj); 472 drm_gem_object_put(gobj); 473 return 0; 474 } 475 476 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, 477 struct drm_file *filp) 478 { 479 union drm_amdgpu_gem_mmap *args = data; 480 uint32_t handle = args->in.handle; 481 memset(args, 0, sizeof(*args)); 482 return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr); 483 } 484 485 /** 486 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value 487 * 488 * @timeout_ns: timeout in ns 489 * 490 * Calculate the timeout in jiffies from an absolute timeout in ns. 491 */ 492 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns) 493 { 494 unsigned long timeout_jiffies; 495 ktime_t timeout; 496 497 /* clamp timeout if it's to large */ 498 if (((int64_t)timeout_ns) < 0) 499 return MAX_SCHEDULE_TIMEOUT; 500 501 timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get()); 502 if (ktime_to_ns(timeout) < 0) 503 return 0; 504 505 timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout)); 506 /* clamp timeout to avoid unsigned-> signed overflow */ 507 if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT ) 508 return MAX_SCHEDULE_TIMEOUT - 1; 509 510 return timeout_jiffies; 511 } 512 513 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 514 struct drm_file *filp) 515 { 516 union drm_amdgpu_gem_wait_idle *args = data; 517 struct drm_gem_object *gobj; 518 struct amdgpu_bo *robj; 519 uint32_t handle = args->in.handle; 520 unsigned long timeout = amdgpu_gem_timeout(args->in.timeout); 521 int r = 0; 522 long ret; 523 524 gobj = drm_gem_object_lookup(filp, handle); 525 if (gobj == NULL) { 526 return -ENOENT; 527 } 528 robj = gem_to_amdgpu_bo(gobj); 529 ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout); 530 531 /* ret == 0 means not signaled, 532 * ret > 0 means signaled 533 * ret < 0 means interrupted before timeout 534 */ 535 if (ret >= 0) { 536 memset(args, 0, sizeof(*args)); 537 args->out.status = (ret == 0); 538 } else 539 r = ret; 540 541 drm_gem_object_put(gobj); 542 return r; 543 } 544 545 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, 546 struct drm_file *filp) 547 { 548 struct drm_amdgpu_gem_metadata *args = data; 549 struct drm_gem_object *gobj; 550 struct amdgpu_bo *robj; 551 int r = -1; 552 553 DRM_DEBUG("%d \n", args->handle); 554 gobj = drm_gem_object_lookup(filp, args->handle); 555 if (gobj == NULL) 556 return -ENOENT; 557 robj = gem_to_amdgpu_bo(gobj); 558 559 r = amdgpu_bo_reserve(robj, false); 560 if (unlikely(r != 0)) 561 goto out; 562 563 if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) { 564 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info); 565 r = amdgpu_bo_get_metadata(robj, args->data.data, 566 sizeof(args->data.data), 567 &args->data.data_size_bytes, 568 &args->data.flags); 569 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) { 570 if (args->data.data_size_bytes > sizeof(args->data.data)) { 571 r = -EINVAL; 572 goto unreserve; 573 } 574 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info); 575 if (!r) 576 r = amdgpu_bo_set_metadata(robj, args->data.data, 577 args->data.data_size_bytes, 578 args->data.flags); 579 } 580 581 unreserve: 582 amdgpu_bo_unreserve(robj); 583 out: 584 drm_gem_object_put(gobj); 585 return r; 586 } 587 588 /** 589 * amdgpu_gem_va_update_vm -update the bo_va in its VM 590 * 591 * @adev: amdgpu_device pointer 592 * @vm: vm to update 593 * @bo_va: bo_va to update 594 * @operation: map, unmap or clear 595 * 596 * Update the bo_va directly after setting its address. Errors are not 597 * vital here, so they are not reported back to userspace. 598 */ 599 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, 600 struct amdgpu_vm *vm, 601 struct amdgpu_bo_va *bo_va, 602 uint32_t operation) 603 { 604 int r; 605 606 if (!amdgpu_vm_ready(vm)) 607 return; 608 609 r = amdgpu_vm_clear_freed(adev, vm, NULL); 610 if (r) 611 goto error; 612 613 if (operation == AMDGPU_VA_OP_MAP || 614 operation == AMDGPU_VA_OP_REPLACE) { 615 r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); 616 if (r) 617 goto error; 618 } 619 620 r = amdgpu_vm_update_pdes(adev, vm, false); 621 622 error: 623 if (r && r != -ERESTARTSYS) 624 DRM_ERROR("Couldn't update BO_VA (%d)\n", r); 625 } 626 627 /** 628 * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags 629 * 630 * @adev: amdgpu_device pointer 631 * @flags: GEM UAPI flags 632 * 633 * Returns the GEM UAPI flags mapped into hardware for the ASIC. 634 */ 635 uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags) 636 { 637 uint64_t pte_flag = 0; 638 639 if (flags & AMDGPU_VM_PAGE_EXECUTABLE) 640 pte_flag |= AMDGPU_PTE_EXECUTABLE; 641 if (flags & AMDGPU_VM_PAGE_READABLE) 642 pte_flag |= AMDGPU_PTE_READABLE; 643 if (flags & AMDGPU_VM_PAGE_WRITEABLE) 644 pte_flag |= AMDGPU_PTE_WRITEABLE; 645 if (flags & AMDGPU_VM_PAGE_PRT) 646 pte_flag |= AMDGPU_PTE_PRT; 647 648 if (adev->gmc.gmc_funcs->map_mtype) 649 pte_flag |= amdgpu_gmc_map_mtype(adev, 650 flags & AMDGPU_VM_MTYPE_MASK); 651 652 return pte_flag; 653 } 654 655 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, 656 struct drm_file *filp) 657 { 658 const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE | 659 AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | 660 AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK; 661 const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE | 662 AMDGPU_VM_PAGE_PRT; 663 664 struct drm_amdgpu_gem_va *args = data; 665 struct drm_gem_object *gobj; 666 struct amdgpu_device *adev = drm_to_adev(dev); 667 struct amdgpu_fpriv *fpriv = filp->driver_priv; 668 struct amdgpu_bo *abo; 669 struct amdgpu_bo_va *bo_va; 670 struct amdgpu_bo_list_entry vm_pd; 671 struct ttm_validate_buffer tv; 672 struct ww_acquire_ctx ticket; 673 struct list_head list, duplicates; 674 uint64_t va_flags; 675 uint64_t vm_size; 676 int r = 0; 677 678 if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { 679 dev_dbg(dev->dev, 680 "va_address 0x%LX is in reserved area 0x%LX\n", 681 args->va_address, AMDGPU_VA_RESERVED_SIZE); 682 return -EINVAL; 683 } 684 685 if (args->va_address >= AMDGPU_GMC_HOLE_START && 686 args->va_address < AMDGPU_GMC_HOLE_END) { 687 dev_dbg(dev->dev, 688 "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n", 689 args->va_address, AMDGPU_GMC_HOLE_START, 690 AMDGPU_GMC_HOLE_END); 691 return -EINVAL; 692 } 693 694 args->va_address &= AMDGPU_GMC_HOLE_MASK; 695 696 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; 697 vm_size -= AMDGPU_VA_RESERVED_SIZE; 698 if (args->va_address + args->map_size > vm_size) { 699 dev_dbg(dev->dev, 700 "va_address 0x%llx is in top reserved area 0x%llx\n", 701 args->va_address + args->map_size, vm_size); 702 return -EINVAL; 703 } 704 705 if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) { 706 dev_dbg(dev->dev, "invalid flags combination 0x%08X\n", 707 args->flags); 708 return -EINVAL; 709 } 710 711 switch (args->operation) { 712 case AMDGPU_VA_OP_MAP: 713 case AMDGPU_VA_OP_UNMAP: 714 case AMDGPU_VA_OP_CLEAR: 715 case AMDGPU_VA_OP_REPLACE: 716 break; 717 default: 718 dev_dbg(dev->dev, "unsupported operation %d\n", 719 args->operation); 720 return -EINVAL; 721 } 722 723 INIT_LIST_HEAD(&list); 724 INIT_LIST_HEAD(&duplicates); 725 if ((args->operation != AMDGPU_VA_OP_CLEAR) && 726 !(args->flags & AMDGPU_VM_PAGE_PRT)) { 727 gobj = drm_gem_object_lookup(filp, args->handle); 728 if (gobj == NULL) 729 return -ENOENT; 730 abo = gem_to_amdgpu_bo(gobj); 731 tv.bo = &abo->tbo; 732 if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) 733 tv.num_shared = 1; 734 else 735 tv.num_shared = 0; 736 list_add(&tv.head, &list); 737 } else { 738 gobj = NULL; 739 abo = NULL; 740 } 741 742 amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); 743 744 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); 745 if (r) 746 goto error_unref; 747 748 if (abo) { 749 bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo); 750 if (!bo_va) { 751 r = -ENOENT; 752 goto error_backoff; 753 } 754 } else if (args->operation != AMDGPU_VA_OP_CLEAR) { 755 bo_va = fpriv->prt_va; 756 } else { 757 bo_va = NULL; 758 } 759 760 switch (args->operation) { 761 case AMDGPU_VA_OP_MAP: 762 va_flags = amdgpu_gem_va_map_flags(adev, args->flags); 763 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, 764 args->offset_in_bo, args->map_size, 765 va_flags); 766 break; 767 case AMDGPU_VA_OP_UNMAP: 768 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address); 769 break; 770 771 case AMDGPU_VA_OP_CLEAR: 772 r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm, 773 args->va_address, 774 args->map_size); 775 break; 776 case AMDGPU_VA_OP_REPLACE: 777 va_flags = amdgpu_gem_va_map_flags(adev, args->flags); 778 r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address, 779 args->offset_in_bo, args->map_size, 780 va_flags); 781 break; 782 default: 783 break; 784 } 785 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug) 786 amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, 787 args->operation); 788 789 error_backoff: 790 ttm_eu_backoff_reservation(&ticket, &list); 791 792 error_unref: 793 drm_gem_object_put(gobj); 794 return r; 795 } 796 797 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, 798 struct drm_file *filp) 799 { 800 struct amdgpu_device *adev = drm_to_adev(dev); 801 struct drm_amdgpu_gem_op *args = data; 802 struct drm_gem_object *gobj; 803 struct amdgpu_vm_bo_base *base; 804 struct amdgpu_bo *robj; 805 int r; 806 807 gobj = drm_gem_object_lookup(filp, args->handle); 808 if (gobj == NULL) { 809 return -ENOENT; 810 } 811 robj = gem_to_amdgpu_bo(gobj); 812 813 r = amdgpu_bo_reserve(robj, false); 814 if (unlikely(r)) 815 goto out; 816 817 switch (args->op) { 818 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: { 819 struct drm_amdgpu_gem_create_in info; 820 void __user *out = u64_to_user_ptr(args->value); 821 822 info.bo_size = robj->tbo.base.size; 823 info.alignment = robj->tbo.page_alignment << PAGE_SHIFT; 824 info.domains = robj->preferred_domains; 825 info.domain_flags = robj->flags; 826 amdgpu_bo_unreserve(robj); 827 if (copy_to_user(out, &info, sizeof(info))) 828 r = -EFAULT; 829 break; 830 } 831 case AMDGPU_GEM_OP_SET_PLACEMENT: 832 if (robj->tbo.base.import_attach && 833 args->value & AMDGPU_GEM_DOMAIN_VRAM) { 834 r = -EINVAL; 835 amdgpu_bo_unreserve(robj); 836 break; 837 } 838 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) { 839 r = -EPERM; 840 amdgpu_bo_unreserve(robj); 841 break; 842 } 843 for (base = robj->vm_bo; base; base = base->next) 844 if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev), 845 amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) { 846 r = -EINVAL; 847 amdgpu_bo_unreserve(robj); 848 goto out; 849 } 850 851 852 robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM | 853 AMDGPU_GEM_DOMAIN_GTT | 854 AMDGPU_GEM_DOMAIN_CPU); 855 robj->allowed_domains = robj->preferred_domains; 856 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) 857 robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; 858 859 if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) 860 amdgpu_vm_bo_invalidate(adev, robj, true); 861 862 amdgpu_bo_unreserve(robj); 863 break; 864 default: 865 amdgpu_bo_unreserve(robj); 866 r = -EINVAL; 867 } 868 869 out: 870 drm_gem_object_put(gobj); 871 return r; 872 } 873 874 static int amdgpu_gem_align_pitch(struct amdgpu_device *adev, 875 int width, 876 int cpp, 877 bool tiled) 878 { 879 int aligned = width; 880 int pitch_mask = 0; 881 882 switch (cpp) { 883 case 1: 884 pitch_mask = 255; 885 break; 886 case 2: 887 pitch_mask = 127; 888 break; 889 case 3: 890 case 4: 891 pitch_mask = 63; 892 break; 893 } 894 895 aligned += pitch_mask; 896 aligned &= ~pitch_mask; 897 return aligned * cpp; 898 } 899 900 int amdgpu_mode_dumb_create(struct drm_file *file_priv, 901 struct drm_device *dev, 902 struct drm_mode_create_dumb *args) 903 { 904 struct amdgpu_device *adev = drm_to_adev(dev); 905 struct drm_gem_object *gobj; 906 uint32_t handle; 907 u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 908 AMDGPU_GEM_CREATE_CPU_GTT_USWC | 909 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 910 u32 domain; 911 int r; 912 913 /* 914 * The buffer returned from this function should be cleared, but 915 * it can only be done if the ring is enabled or we'll fail to 916 * create the buffer. 917 */ 918 if (adev->mman.buffer_funcs_enabled) 919 flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED; 920 921 args->pitch = amdgpu_gem_align_pitch(adev, args->width, 922 DIV_ROUND_UP(args->bpp, 8), 0); 923 args->size = (u64)args->pitch * args->height; 924 args->size = ALIGN(args->size, PAGE_SIZE); 925 domain = amdgpu_bo_get_preferred_domain(adev, 926 amdgpu_display_supported_domains(adev, flags)); 927 r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags, 928 ttm_bo_type_device, NULL, &gobj); 929 if (r) 930 return -ENOMEM; 931 932 r = drm_gem_handle_create(file_priv, gobj, &handle); 933 /* drop reference from allocate - handle holds it now */ 934 drm_gem_object_put(gobj); 935 if (r) { 936 return r; 937 } 938 args->handle = handle; 939 return 0; 940 } 941 942 #if defined(CONFIG_DEBUG_FS) 943 static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused) 944 { 945 struct amdgpu_device *adev = (struct amdgpu_device *)m->private; 946 struct drm_device *dev = adev_to_drm(adev); 947 struct drm_file *file; 948 int r; 949 950 r = mutex_lock_interruptible(&dev->filelist_mutex); 951 if (r) 952 return r; 953 954 list_for_each_entry(file, &dev->filelist, lhead) { 955 struct task_struct *task; 956 struct drm_gem_object *gobj; 957 int id; 958 959 /* 960 * Although we have a valid reference on file->pid, that does 961 * not guarantee that the task_struct who called get_pid() is 962 * still alive (e.g. get_pid(current) => fork() => exit()). 963 * Therefore, we need to protect this ->comm access using RCU. 964 */ 965 rcu_read_lock(); 966 task = pid_task(file->pid, PIDTYPE_PID); 967 seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid), 968 task ? task->comm : "<unknown>"); 969 rcu_read_unlock(); 970 971 spin_lock(&file->table_lock); 972 idr_for_each_entry(&file->object_idr, gobj, id) { 973 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); 974 975 amdgpu_bo_print_info(id, bo, m); 976 } 977 spin_unlock(&file->table_lock); 978 } 979 980 mutex_unlock(&dev->filelist_mutex); 981 return 0; 982 } 983 984 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_gem_info); 985 986 #endif 987 988 void amdgpu_debugfs_gem_init(struct amdgpu_device *adev) 989 { 990 #if defined(CONFIG_DEBUG_FS) 991 struct drm_minor *minor = adev_to_drm(adev)->primary; 992 struct dentry *root = minor->debugfs_root; 993 994 debugfs_create_file("amdgpu_gem_info", 0444, root, adev, 995 &amdgpu_debugfs_gem_info_fops); 996 #endif 997 } 998