1 /* 2 * Copyright 2017 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "uvmm.h" 23 #include "umem.h" 24 #include "ummu.h" 25 26 #include <core/client.h> 27 #include <core/memory.h> 28 29 #include <nvif/if000c.h> 30 #include <nvif/unpack.h> 31 32 static const struct nvkm_object_func nvkm_uvmm; 33 struct nvkm_vmm * 34 nvkm_uvmm_search(struct nvkm_client *client, u64 handle) 35 { 36 struct nvkm_object *object; 37 38 object = nvkm_object_search(client, handle, &nvkm_uvmm); 39 if (IS_ERR(object)) 40 return (void *)object; 41 42 return nvkm_uvmm(object)->vmm; 43 } 44 45 static int 46 nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc) 47 { 48 struct nvkm_client *client = uvmm->object.client; 49 union { 50 struct nvif_vmm_unmap_v0 v0; 51 } *args = argv; 52 struct nvkm_vmm *vmm = uvmm->vmm; 53 struct nvkm_vma *vma; 54 int ret = -ENOSYS; 55 u64 addr; 56 57 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { 58 addr = args->v0.addr; 59 } else 60 return ret; 61 62 mutex_lock(&vmm->mutex); 63 vma = nvkm_vmm_node_search(vmm, addr); 64 if (ret = -ENOENT, !vma || vma->addr != addr) { 65 VMM_DEBUG(vmm, "lookup %016llx: %016llx", 66 addr, vma ? vma->addr : ~0ULL); 67 goto done; 68 } 69 70 if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) { 71 VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr, 72 vma->user, !client->super, vma->busy); 73 goto done; 74 } 75 76 if (ret = -EINVAL, !vma->memory) { 77 VMM_DEBUG(vmm, "unmapped"); 78 goto done; 79 } 80 81 nvkm_vmm_unmap_locked(vmm, vma); 82 ret = 0; 83 done: 84 mutex_unlock(&vmm->mutex); 85 return ret; 86 } 87 88 static int 89 nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc) 90 { 91 struct nvkm_client *client = uvmm->object.client; 92 union { 93 struct nvif_vmm_map_v0 v0; 94 } *args = argv; 95 u64 addr, size, handle, offset; 96 struct nvkm_vmm *vmm = uvmm->vmm; 97 struct nvkm_vma *vma; 98 struct nvkm_memory *memory; 99 int ret = -ENOSYS; 100 101 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) { 102 addr = args->v0.addr; 103 size = args->v0.size; 104 handle = args->v0.memory; 105 offset = args->v0.offset; 106 } else 107 return ret; 108 109 memory = nvkm_umem_search(client, handle); 110 if (IS_ERR(memory)) { 111 VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory)); 112 return PTR_ERR(memory); 113 } 114 115 mutex_lock(&vmm->mutex); 116 if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) { 117 VMM_DEBUG(vmm, "lookup %016llx", addr); 118 goto fail; 119 } 120 121 if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) { 122 VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr, 123 vma->user, !client->super, vma->busy); 124 goto fail; 125 } 126 127 if (ret = -EINVAL, vma->addr != addr || vma->size != size) { 128 if (addr + size > vma->addr + vma->size || vma->memory || 129 (vma->refd == NVKM_VMA_PAGE_NONE && !vma->mapref)) { 130 VMM_DEBUG(vmm, "split %d %d %d " 131 "%016llx %016llx %016llx %016llx", 132 !!vma->memory, vma->refd, vma->mapref, 133 addr, size, vma->addr, (u64)vma->size); 134 goto fail; 135 } 136 137 if (vma->addr != addr) { 138 const u64 tail = vma->size + vma->addr - addr; 139 if (ret = -ENOMEM, !(vma = nvkm_vma_tail(vma, tail))) 140 goto fail; 141 vma->part = true; 142 nvkm_vmm_node_insert(vmm, vma); 143 } 144 145 if (vma->size != size) { 146 const u64 tail = vma->size - size; 147 struct nvkm_vma *tmp; 148 if (ret = -ENOMEM, !(tmp = nvkm_vma_tail(vma, tail))) { 149 nvkm_vmm_unmap_region(vmm, vma); 150 goto fail; 151 } 152 tmp->part = true; 153 nvkm_vmm_node_insert(vmm, tmp); 154 } 155 } 156 vma->busy = true; 157 mutex_unlock(&vmm->mutex); 158 159 ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc); 160 if (ret == 0) { 161 /* Successful map will clear vma->busy. */ 162 nvkm_memory_unref(&memory); 163 return 0; 164 } 165 166 mutex_lock(&vmm->mutex); 167 vma->busy = false; 168 nvkm_vmm_unmap_region(vmm, vma); 169 fail: 170 mutex_unlock(&vmm->mutex); 171 nvkm_memory_unref(&memory); 172 return ret; 173 } 174 175 static int 176 nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc) 177 { 178 struct nvkm_client *client = uvmm->object.client; 179 union { 180 struct nvif_vmm_put_v0 v0; 181 } *args = argv; 182 struct nvkm_vmm *vmm = uvmm->vmm; 183 struct nvkm_vma *vma; 184 int ret = -ENOSYS; 185 u64 addr; 186 187 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { 188 addr = args->v0.addr; 189 } else 190 return ret; 191 192 mutex_lock(&vmm->mutex); 193 vma = nvkm_vmm_node_search(vmm, args->v0.addr); 194 if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) { 195 VMM_DEBUG(vmm, "lookup %016llx: %016llx %d", addr, 196 vma ? vma->addr : ~0ULL, vma ? vma->part : 0); 197 goto done; 198 } 199 200 if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) { 201 VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr, 202 vma->user, !client->super, vma->busy); 203 goto done; 204 } 205 206 nvkm_vmm_put_locked(vmm, vma); 207 ret = 0; 208 done: 209 mutex_unlock(&vmm->mutex); 210 return ret; 211 } 212 213 static int 214 nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc) 215 { 216 struct nvkm_client *client = uvmm->object.client; 217 union { 218 struct nvif_vmm_get_v0 v0; 219 } *args = argv; 220 struct nvkm_vmm *vmm = uvmm->vmm; 221 struct nvkm_vma *vma; 222 int ret = -ENOSYS; 223 bool getref, mapref, sparse; 224 u8 page, align; 225 u64 size; 226 227 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { 228 getref = args->v0.type == NVIF_VMM_GET_V0_PTES; 229 mapref = args->v0.type == NVIF_VMM_GET_V0_ADDR; 230 sparse = args->v0.sparse; 231 page = args->v0.page; 232 align = args->v0.align; 233 size = args->v0.size; 234 } else 235 return ret; 236 237 mutex_lock(&vmm->mutex); 238 ret = nvkm_vmm_get_locked(vmm, getref, mapref, sparse, 239 page, align, size, &vma); 240 mutex_unlock(&vmm->mutex); 241 if (ret) 242 return ret; 243 244 args->v0.addr = vma->addr; 245 vma->user = !client->super; 246 return ret; 247 } 248 249 static int 250 nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc) 251 { 252 union { 253 struct nvif_vmm_page_v0 v0; 254 } *args = argv; 255 const struct nvkm_vmm_page *page; 256 int ret = -ENOSYS; 257 u8 type, index, nr; 258 259 page = uvmm->vmm->func->page; 260 for (nr = 0; page[nr].shift; nr++); 261 262 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { 263 if ((index = args->v0.index) >= nr) 264 return -EINVAL; 265 type = page[index].type; 266 args->v0.shift = page[index].shift; 267 args->v0.sparse = !!(type & NVKM_VMM_PAGE_SPARSE); 268 args->v0.vram = !!(type & NVKM_VMM_PAGE_VRAM); 269 args->v0.host = !!(type & NVKM_VMM_PAGE_HOST); 270 args->v0.comp = !!(type & NVKM_VMM_PAGE_COMP); 271 } else 272 return -ENOSYS; 273 274 return 0; 275 } 276 277 static int 278 nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc) 279 { 280 struct nvkm_uvmm *uvmm = nvkm_uvmm(object); 281 switch (mthd) { 282 case NVIF_VMM_V0_PAGE : return nvkm_uvmm_mthd_page (uvmm, argv, argc); 283 case NVIF_VMM_V0_GET : return nvkm_uvmm_mthd_get (uvmm, argv, argc); 284 case NVIF_VMM_V0_PUT : return nvkm_uvmm_mthd_put (uvmm, argv, argc); 285 case NVIF_VMM_V0_MAP : return nvkm_uvmm_mthd_map (uvmm, argv, argc); 286 case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc); 287 default: 288 break; 289 } 290 return -EINVAL; 291 } 292 293 static void * 294 nvkm_uvmm_dtor(struct nvkm_object *object) 295 { 296 struct nvkm_uvmm *uvmm = nvkm_uvmm(object); 297 nvkm_vmm_unref(&uvmm->vmm); 298 return uvmm; 299 } 300 301 static const struct nvkm_object_func 302 nvkm_uvmm = { 303 .dtor = nvkm_uvmm_dtor, 304 .mthd = nvkm_uvmm_mthd, 305 }; 306 307 int 308 nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, 309 struct nvkm_object **pobject) 310 { 311 struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu; 312 const bool more = oclass->base.maxver >= 0; 313 union { 314 struct nvif_vmm_v0 v0; 315 } *args = argv; 316 const struct nvkm_vmm_page *page; 317 struct nvkm_uvmm *uvmm; 318 int ret = -ENOSYS; 319 u64 addr, size; 320 321 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, more))) { 322 addr = args->v0.addr; 323 size = args->v0.size; 324 } else 325 return ret; 326 327 if (!(uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL))) 328 return -ENOMEM; 329 nvkm_object_ctor(&nvkm_uvmm, oclass, &uvmm->object); 330 *pobject = &uvmm->object; 331 332 if (!mmu->vmm) { 333 ret = mmu->func->vmm.ctor(mmu, addr, size, argv, argc, 334 NULL, "user", &uvmm->vmm); 335 if (ret) 336 return ret; 337 338 uvmm->vmm->debug = max(uvmm->vmm->debug, oclass->client->debug); 339 } else { 340 if (size) 341 return -EINVAL; 342 343 uvmm->vmm = nvkm_vmm_ref(mmu->vmm); 344 } 345 346 page = uvmm->vmm->func->page; 347 args->v0.page_nr = 0; 348 while (page && (page++)->shift) 349 args->v0.page_nr++; 350 args->v0.addr = uvmm->vmm->start; 351 args->v0.size = uvmm->vmm->limit; 352 return 0; 353 } 354