Lines Matching +full:protect +full:- +full:exec
32 #include <linux/dma-buf.h>
50 struct ttm_buffer_object *bo = vmf->vma->vm_private_data; in amdgpu_gem_fault()
51 struct drm_device *ddev = bo->base.dev; in amdgpu_gem_fault()
66 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, in amdgpu_gem_fault()
71 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); in amdgpu_gem_fault()
73 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) in amdgpu_gem_fault()
77 dma_resv_unlock(bo->base.resv); in amdgpu_gem_fault()
126 bo = &ubo->bo; in amdgpu_gem_object_create()
127 *obj = &bo->tbo.base; in amdgpu_gem_object_create()
128 (*obj)->funcs = &amdgpu_gem_object_funcs; in amdgpu_gem_object_create()
138 mutex_lock(&ddev->filelist_mutex); in amdgpu_gem_force_release()
140 list_for_each_entry(file, &ddev->filelist, lhead) { in amdgpu_gem_force_release()
145 spin_lock(&file->table_lock); in amdgpu_gem_force_release()
146 idr_for_each_entry(&file->object_idr, gobj, handle) { in amdgpu_gem_force_release()
150 idr_destroy(&file->object_idr); in amdgpu_gem_force_release()
151 spin_unlock(&file->table_lock); in amdgpu_gem_force_release()
154 mutex_unlock(&ddev->filelist_mutex); in amdgpu_gem_force_release()
165 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); in amdgpu_gem_object_open()
166 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; in amdgpu_gem_object_open()
167 struct amdgpu_vm *vm = &fpriv->vm; in amdgpu_gem_object_open()
172 mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm); in amdgpu_gem_object_open()
173 if (mm && mm != current->mm) in amdgpu_gem_object_open()
174 return -EPERM; in amdgpu_gem_object_open()
176 if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID && in amdgpu_gem_object_open()
177 abo->tbo.base.resv != vm->root.bo->tbo.base.resv) in amdgpu_gem_object_open()
178 return -EPERM; in amdgpu_gem_object_open()
188 ++bo_va->ref_count; in amdgpu_gem_object_open()
197 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_gem_object_close()
198 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; in amdgpu_gem_object_close()
199 struct amdgpu_vm *vm = &fpriv->vm; in amdgpu_gem_object_close()
203 struct drm_exec exec; in amdgpu_gem_object_close() local
206 drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES); in amdgpu_gem_object_close()
207 drm_exec_until_all_locked(&exec) { in amdgpu_gem_object_close()
208 r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1); in amdgpu_gem_object_close()
209 drm_exec_retry_on_contention(&exec); in amdgpu_gem_object_close()
213 r = amdgpu_vm_lock_pd(vm, &exec, 0); in amdgpu_gem_object_close()
214 drm_exec_retry_on_contention(&exec); in amdgpu_gem_object_close()
220 if (!bo_va || --bo_va->ref_count) in amdgpu_gem_object_close()
229 dev_err(adev->dev, "failed to clear page " in amdgpu_gem_object_close()
239 dev_err(adev->dev, "leaking bo va (%ld)\n", r); in amdgpu_gem_object_close()
240 drm_exec_fini(&exec); in amdgpu_gem_object_close()
247 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) in amdgpu_gem_object_mmap()
248 return -EPERM; in amdgpu_gem_object_mmap()
249 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) in amdgpu_gem_object_mmap()
250 return -EPERM; in amdgpu_gem_object_mmap()
257 if (is_cow_mapping(vma->vm_flags) && in amdgpu_gem_object_mmap()
258 !(vma->vm_flags & VM_ACCESS_FLAGS)) in amdgpu_gem_object_mmap()
282 struct amdgpu_fpriv *fpriv = filp->driver_priv; in amdgpu_gem_create_ioctl()
283 struct amdgpu_vm *vm = &fpriv->vm; in amdgpu_gem_create_ioctl()
285 uint64_t flags = args->in.domain_flags; in amdgpu_gem_create_ioctl()
286 uint64_t size = args->in.bo_size; in amdgpu_gem_create_ioctl()
293 if (args->in.domains & AMDGPU_GEM_DOMAIN_DOORBELL) in amdgpu_gem_create_ioctl()
294 return -EINVAL; in amdgpu_gem_create_ioctl()
305 return -EINVAL; in amdgpu_gem_create_ioctl()
308 if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK) in amdgpu_gem_create_ioctl()
309 return -EINVAL; in amdgpu_gem_create_ioctl()
313 return -EINVAL; in amdgpu_gem_create_ioctl()
317 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | in amdgpu_gem_create_ioctl()
323 DRM_ERROR("GDS bo cannot be per-vm-bo\n"); in amdgpu_gem_create_ioctl()
324 return -EINVAL; in amdgpu_gem_create_ioctl()
330 r = amdgpu_bo_reserve(vm->root.bo, false); in amdgpu_gem_create_ioctl()
334 resv = vm->root.bo->tbo.base.resv; in amdgpu_gem_create_ioctl()
337 initial_domain = (u32)(0xffffffff & args->in.domains); in amdgpu_gem_create_ioctl()
339 r = amdgpu_gem_object_create(adev, size, args->in.alignment, in amdgpu_gem_create_ioctl()
341 flags, ttm_bo_type_device, resv, &gobj, fpriv->xcp_id + 1); in amdgpu_gem_create_ioctl()
342 if (r && r != -ERESTARTSYS) { in amdgpu_gem_create_ioctl()
353 size, initial_domain, args->in.alignment, r); in amdgpu_gem_create_ioctl()
360 abo->parent = amdgpu_bo_ref(vm->root.bo); in amdgpu_gem_create_ioctl()
362 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_gem_create_ioctl()
368 /* drop reference from allocate - handle holds it now */ in amdgpu_gem_create_ioctl()
374 args->out.handle = handle; in amdgpu_gem_create_ioctl()
384 struct amdgpu_fpriv *fpriv = filp->driver_priv; in amdgpu_gem_userptr_ioctl()
391 args->addr = untagged_addr(args->addr); in amdgpu_gem_userptr_ioctl()
393 if (offset_in_page(args->addr | args->size)) in amdgpu_gem_userptr_ioctl()
394 return -EINVAL; in amdgpu_gem_userptr_ioctl()
397 if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY | in amdgpu_gem_userptr_ioctl()
400 return -EINVAL; in amdgpu_gem_userptr_ioctl()
402 if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && in amdgpu_gem_userptr_ioctl()
403 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) { in amdgpu_gem_userptr_ioctl()
406 return -EACCES; in amdgpu_gem_userptr_ioctl()
410 r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU, in amdgpu_gem_userptr_ioctl()
411 0, ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1); in amdgpu_gem_userptr_ioctl()
416 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; in amdgpu_gem_userptr_ioctl()
417 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; in amdgpu_gem_userptr_ioctl()
418 r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags); in amdgpu_gem_userptr_ioctl()
422 r = amdgpu_hmm_register(bo, args->addr); in amdgpu_gem_userptr_ioctl()
426 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { in amdgpu_gem_userptr_ioctl()
427 r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, in amdgpu_gem_userptr_ioctl()
437 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_gem_userptr_ioctl()
447 args->handle = handle; in amdgpu_gem_userptr_ioctl()
450 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) in amdgpu_gem_userptr_ioctl()
451 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); in amdgpu_gem_userptr_ioctl()
468 return -ENOENT; in amdgpu_mode_dumb_mmap()
471 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || in amdgpu_mode_dumb_mmap()
472 (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { in amdgpu_mode_dumb_mmap()
474 return -EPERM; in amdgpu_mode_dumb_mmap()
485 uint32_t handle = args->in.handle; in amdgpu_gem_mmap_ioctl()
488 return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr); in amdgpu_gem_mmap_ioctl()
492 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
512 /* clamp timeout to avoid unsigned-> signed overflow */ in amdgpu_gem_timeout()
514 return MAX_SCHEDULE_TIMEOUT - 1; in amdgpu_gem_timeout()
525 uint32_t handle = args->in.handle; in amdgpu_gem_wait_idle_ioctl()
526 unsigned long timeout = amdgpu_gem_timeout(args->in.timeout); in amdgpu_gem_wait_idle_ioctl()
532 return -ENOENT; in amdgpu_gem_wait_idle_ioctl()
535 ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ, in amdgpu_gem_wait_idle_ioctl()
544 args->out.status = (ret == 0); in amdgpu_gem_wait_idle_ioctl()
558 int r = -1; in amdgpu_gem_metadata_ioctl()
560 DRM_DEBUG("%d\n", args->handle); in amdgpu_gem_metadata_ioctl()
561 gobj = drm_gem_object_lookup(filp, args->handle); in amdgpu_gem_metadata_ioctl()
563 return -ENOENT; in amdgpu_gem_metadata_ioctl()
570 if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) { in amdgpu_gem_metadata_ioctl()
571 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info); in amdgpu_gem_metadata_ioctl()
572 r = amdgpu_bo_get_metadata(robj, args->data.data, in amdgpu_gem_metadata_ioctl()
573 sizeof(args->data.data), in amdgpu_gem_metadata_ioctl()
574 &args->data.data_size_bytes, in amdgpu_gem_metadata_ioctl()
575 &args->data.flags); in amdgpu_gem_metadata_ioctl()
576 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) { in amdgpu_gem_metadata_ioctl()
577 if (args->data.data_size_bytes > sizeof(args->data.data)) { in amdgpu_gem_metadata_ioctl()
578 r = -EINVAL; in amdgpu_gem_metadata_ioctl()
581 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info); in amdgpu_gem_metadata_ioctl()
583 r = amdgpu_bo_set_metadata(robj, args->data.data, in amdgpu_gem_metadata_ioctl()
584 args->data.data_size_bytes, in amdgpu_gem_metadata_ioctl()
585 args->data.flags); in amdgpu_gem_metadata_ioctl()
596 * amdgpu_gem_va_update_vm -update the bo_va in its VM
630 if (r && r != -ERESTARTSYS) in amdgpu_gem_va_update_vm()
635 * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
657 if (adev->gmc.gmc_funcs->map_mtype) in amdgpu_gem_va_map_flags()
677 struct amdgpu_fpriv *fpriv = filp->driver_priv; in amdgpu_gem_va_ioctl()
680 struct drm_exec exec; in amdgpu_gem_va_ioctl() local
685 if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { in amdgpu_gem_va_ioctl()
686 dev_dbg(dev->dev, in amdgpu_gem_va_ioctl()
688 args->va_address, AMDGPU_VA_RESERVED_SIZE); in amdgpu_gem_va_ioctl()
689 return -EINVAL; in amdgpu_gem_va_ioctl()
692 if (args->va_address >= AMDGPU_GMC_HOLE_START && in amdgpu_gem_va_ioctl()
693 args->va_address < AMDGPU_GMC_HOLE_END) { in amdgpu_gem_va_ioctl()
694 dev_dbg(dev->dev, in amdgpu_gem_va_ioctl()
695 "va_address 0x%llx is in VA hole 0x%llx-0x%llx\n", in amdgpu_gem_va_ioctl()
696 args->va_address, AMDGPU_GMC_HOLE_START, in amdgpu_gem_va_ioctl()
698 return -EINVAL; in amdgpu_gem_va_ioctl()
701 args->va_address &= AMDGPU_GMC_HOLE_MASK; in amdgpu_gem_va_ioctl()
703 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; in amdgpu_gem_va_ioctl()
704 vm_size -= AMDGPU_VA_RESERVED_SIZE; in amdgpu_gem_va_ioctl()
705 if (args->va_address + args->map_size > vm_size) { in amdgpu_gem_va_ioctl()
706 dev_dbg(dev->dev, in amdgpu_gem_va_ioctl()
708 args->va_address + args->map_size, vm_size); in amdgpu_gem_va_ioctl()
709 return -EINVAL; in amdgpu_gem_va_ioctl()
712 if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) { in amdgpu_gem_va_ioctl()
713 dev_dbg(dev->dev, "invalid flags combination 0x%08X\n", in amdgpu_gem_va_ioctl()
714 args->flags); in amdgpu_gem_va_ioctl()
715 return -EINVAL; in amdgpu_gem_va_ioctl()
718 switch (args->operation) { in amdgpu_gem_va_ioctl()
725 dev_dbg(dev->dev, "unsupported operation %d\n", in amdgpu_gem_va_ioctl()
726 args->operation); in amdgpu_gem_va_ioctl()
727 return -EINVAL; in amdgpu_gem_va_ioctl()
730 if ((args->operation != AMDGPU_VA_OP_CLEAR) && in amdgpu_gem_va_ioctl()
731 !(args->flags & AMDGPU_VM_PAGE_PRT)) { in amdgpu_gem_va_ioctl()
732 gobj = drm_gem_object_lookup(filp, args->handle); in amdgpu_gem_va_ioctl()
734 return -ENOENT; in amdgpu_gem_va_ioctl()
741 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | in amdgpu_gem_va_ioctl()
743 drm_exec_until_all_locked(&exec) { in amdgpu_gem_va_ioctl()
745 r = drm_exec_lock_obj(&exec, gobj); in amdgpu_gem_va_ioctl()
746 drm_exec_retry_on_contention(&exec); in amdgpu_gem_va_ioctl()
751 r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2); in amdgpu_gem_va_ioctl()
752 drm_exec_retry_on_contention(&exec); in amdgpu_gem_va_ioctl()
758 bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo); in amdgpu_gem_va_ioctl()
760 r = -ENOENT; in amdgpu_gem_va_ioctl()
763 } else if (args->operation != AMDGPU_VA_OP_CLEAR) { in amdgpu_gem_va_ioctl()
764 bo_va = fpriv->prt_va; in amdgpu_gem_va_ioctl()
769 switch (args->operation) { in amdgpu_gem_va_ioctl()
771 va_flags = amdgpu_gem_va_map_flags(adev, args->flags); in amdgpu_gem_va_ioctl()
772 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, in amdgpu_gem_va_ioctl()
773 args->offset_in_bo, args->map_size, in amdgpu_gem_va_ioctl()
777 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address); in amdgpu_gem_va_ioctl()
781 r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm, in amdgpu_gem_va_ioctl()
782 args->va_address, in amdgpu_gem_va_ioctl()
783 args->map_size); in amdgpu_gem_va_ioctl()
786 va_flags = amdgpu_gem_va_map_flags(adev, args->flags); in amdgpu_gem_va_ioctl()
787 r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address, in amdgpu_gem_va_ioctl()
788 args->offset_in_bo, args->map_size, in amdgpu_gem_va_ioctl()
794 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug) in amdgpu_gem_va_ioctl()
795 amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, in amdgpu_gem_va_ioctl()
796 args->operation); in amdgpu_gem_va_ioctl()
799 drm_exec_fini(&exec); in amdgpu_gem_va_ioctl()
814 gobj = drm_gem_object_lookup(filp, args->handle); in amdgpu_gem_op_ioctl()
816 return -ENOENT; in amdgpu_gem_op_ioctl()
824 switch (args->op) { in amdgpu_gem_op_ioctl()
827 void __user *out = u64_to_user_ptr(args->value); in amdgpu_gem_op_ioctl()
829 info.bo_size = robj->tbo.base.size; in amdgpu_gem_op_ioctl()
830 info.alignment = robj->tbo.page_alignment << PAGE_SHIFT; in amdgpu_gem_op_ioctl()
831 info.domains = robj->preferred_domains; in amdgpu_gem_op_ioctl()
832 info.domain_flags = robj->flags; in amdgpu_gem_op_ioctl()
835 r = -EFAULT; in amdgpu_gem_op_ioctl()
839 if (robj->tbo.base.import_attach && in amdgpu_gem_op_ioctl()
840 args->value & AMDGPU_GEM_DOMAIN_VRAM) { in amdgpu_gem_op_ioctl()
841 r = -EINVAL; in amdgpu_gem_op_ioctl()
845 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) { in amdgpu_gem_op_ioctl()
846 r = -EPERM; in amdgpu_gem_op_ioctl()
850 for (base = robj->vm_bo; base; base = base->next) in amdgpu_gem_op_ioctl()
851 if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev), in amdgpu_gem_op_ioctl()
852 amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) { in amdgpu_gem_op_ioctl()
853 r = -EINVAL; in amdgpu_gem_op_ioctl()
859 robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM | in amdgpu_gem_op_ioctl()
862 robj->allowed_domains = robj->preferred_domains; in amdgpu_gem_op_ioctl()
863 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) in amdgpu_gem_op_ioctl()
864 robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; in amdgpu_gem_op_ioctl()
866 if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) in amdgpu_gem_op_ioctl()
873 r = -EINVAL; in amdgpu_gem_op_ioctl()
912 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; in amdgpu_mode_dumb_create()
926 if (adev->mman.buffer_funcs_enabled) in amdgpu_mode_dumb_create()
929 args->pitch = amdgpu_gem_align_pitch(adev, args->width, in amdgpu_mode_dumb_create()
930 DIV_ROUND_UP(args->bpp, 8), 0); in amdgpu_mode_dumb_create()
931 args->size = (u64)args->pitch * args->height; in amdgpu_mode_dumb_create()
932 args->size = ALIGN(args->size, PAGE_SIZE); in amdgpu_mode_dumb_create()
935 r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags, in amdgpu_mode_dumb_create()
936 ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1); in amdgpu_mode_dumb_create()
938 return -ENOMEM; in amdgpu_mode_dumb_create()
941 /* drop reference from allocate - handle holds it now */ in amdgpu_mode_dumb_create()
946 args->handle = handle; in amdgpu_mode_dumb_create()
953 struct amdgpu_device *adev = m->private; in amdgpu_debugfs_gem_info_show()
958 r = mutex_lock_interruptible(&dev->filelist_mutex); in amdgpu_debugfs_gem_info_show()
962 list_for_each_entry(file, &dev->filelist, lhead) { in amdgpu_debugfs_gem_info_show()
969 * Although we have a valid reference on file->pid, that does in amdgpu_debugfs_gem_info_show()
972 * Therefore, we need to protect this ->comm access using RCU. in amdgpu_debugfs_gem_info_show()
975 pid = rcu_dereference(file->pid); in amdgpu_debugfs_gem_info_show()
978 task ? task->comm : "<unknown>"); in amdgpu_debugfs_gem_info_show()
981 spin_lock(&file->table_lock); in amdgpu_debugfs_gem_info_show()
982 idr_for_each_entry(&file->object_idr, gobj, id) { in amdgpu_debugfs_gem_info_show()
987 spin_unlock(&file->table_lock); in amdgpu_debugfs_gem_info_show()
990 mutex_unlock(&dev->filelist_mutex); in amdgpu_debugfs_gem_info_show()
1001 struct drm_minor *minor = adev_to_drm(adev)->primary; in amdgpu_debugfs_gem_init()
1002 struct dentry *root = minor->debugfs_root; in amdgpu_debugfs_gem_init()