1 /* 2 * Copyright 2017 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "nouveau_vmm.h" 23 #include "nouveau_drv.h" 24 #include "nouveau_bo.h" 25 #include "nouveau_svm.h" 26 #include "nouveau_mem.h" 27 28 void 29 nouveau_vma_unmap(struct nouveau_vma *vma) 30 { 31 if (vma->mem) { 32 nvif_vmm_unmap(&vma->vmm->vmm, vma->addr); 33 vma->mem = NULL; 34 } 35 } 36 37 int 38 nouveau_vma_map(struct nouveau_vma *vma, struct nouveau_mem *mem) 39 { 40 struct nvif_vma tmp = { .addr = vma->addr }; 41 int ret = nouveau_mem_map(mem, &vma->vmm->vmm, &tmp); 42 if (ret) 43 return ret; 44 vma->mem = mem; 45 return 0; 46 } 47 48 struct nouveau_vma * 49 nouveau_vma_find(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm) 50 { 51 struct nouveau_vma *vma; 52 53 list_for_each_entry(vma, &nvbo->vma_list, head) { 54 if (vma->vmm == vmm) 55 return vma; 56 } 57 58 return NULL; 59 } 60 61 void 62 nouveau_vma_del(struct nouveau_vma **pvma) 63 { 64 struct nouveau_vma *vma = *pvma; 65 if (vma && --vma->refs <= 0) { 66 if (likely(vma->addr != ~0ULL)) { 67 struct nvif_vma tmp = { .addr = vma->addr, .size = 1 }; 68 nvif_vmm_put(&vma->vmm->vmm, &tmp); 69 } 70 list_del(&vma->head); 71 kfree(*pvma); 72 } 73 *pvma = NULL; 74 } 75 76 int 77 nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm, 78 struct nouveau_vma **pvma) 79 { 80 struct nouveau_mem *mem = nouveau_mem(nvbo->bo.resource); 81 struct nouveau_vma *vma; 82 struct nvif_vma tmp; 83 int ret; 84 85 if ((vma = *pvma = nouveau_vma_find(nvbo, vmm))) { 86 vma->refs++; 87 return 0; 88 } 89 90 if (!(vma = *pvma = kmalloc(sizeof(*vma), GFP_KERNEL))) 91 return -ENOMEM; 92 vma->vmm = vmm; 93 vma->refs = 1; 94 vma->addr = ~0ULL; 95 vma->mem = NULL; 96 vma->fence = NULL; 97 list_add_tail(&vma->head, &nvbo->vma_list); 98 99 if (nvbo->bo.resource->mem_type != TTM_PL_SYSTEM && 100 mem->mem.page == nvbo->page) { 101 ret = nvif_vmm_get(&vmm->vmm, LAZY, false, mem->mem.page, 0, 102 mem->mem.size, &tmp); 103 if (ret) 104 goto done; 105 106 vma->addr = tmp.addr; 107 ret = nouveau_vma_map(vma, mem); 108 } else { 109 ret = nvif_vmm_get(&vmm->vmm, PTES, false, mem->mem.page, 0, 110 mem->mem.size, &tmp); 111 if (ret) 112 goto done; 113 114 vma->addr = tmp.addr; 115 } 116 117 done: 118 if (ret) 119 nouveau_vma_del(pvma); 120 return ret; 121 } 122 123 void 124 nouveau_vmm_fini(struct nouveau_vmm *vmm) 125 { 126 nouveau_svmm_fini(&vmm->svmm); 127 nvif_vmm_dtor(&vmm->vmm); 128 vmm->cli = NULL; 129 } 130 131 int 132 nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm) 133 { 134 int ret = nvif_vmm_ctor(&cli->mmu, "drmVmm", oclass, UNMANAGED, 135 PAGE_SIZE, 0, NULL, 0, &vmm->vmm); 136 if (ret) 137 return ret; 138 139 vmm->cli = cli; 140 return 0; 141 } 142