1 /*
2  * Copyright 2017 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "uvmm.h"
23 #include "umem.h"
24 #include "ummu.h"
25 
26 #include <core/client.h>
27 #include <core/memory.h>
28 
29 #include <nvif/if000c.h>
30 #include <nvif/unpack.h>
31 
32 static const struct nvkm_object_func nvkm_uvmm;
33 struct nvkm_vmm *
34 nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
35 {
36 	struct nvkm_object *object;
37 
38 	object = nvkm_object_search(client, handle, &nvkm_uvmm);
39 	if (IS_ERR(object))
40 		return (void *)object;
41 
42 	return nvkm_uvmm(object)->vmm;
43 }
44 
45 static int
46 nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
47 {
48 	struct nvkm_client *client = uvmm->object.client;
49 	union {
50 		struct nvif_vmm_unmap_v0 v0;
51 	} *args = argv;
52 	struct nvkm_vmm *vmm = uvmm->vmm;
53 	struct nvkm_vma *vma;
54 	int ret = -ENOSYS;
55 	u64 addr;
56 
57 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
58 		addr = args->v0.addr;
59 	} else
60 		return ret;
61 
62 	mutex_lock(&vmm->mutex);
63 	vma = nvkm_vmm_node_search(vmm, addr);
64 	if (ret = -ENOENT, !vma || vma->addr != addr) {
65 		VMM_DEBUG(vmm, "lookup %016llx: %016llx",
66 			  addr, vma ? vma->addr : ~0ULL);
67 		goto done;
68 	}
69 
70 	if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
71 		VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
72 			  vma->user, !client->super, vma->busy);
73 		goto done;
74 	}
75 
76 	if (ret = -EINVAL, !vma->memory) {
77 		VMM_DEBUG(vmm, "unmapped");
78 		goto done;
79 	}
80 
81 	nvkm_vmm_unmap_locked(vmm, vma);
82 	ret = 0;
83 done:
84 	mutex_unlock(&vmm->mutex);
85 	return ret;
86 }
87 
88 static int
89 nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
90 {
91 	struct nvkm_client *client = uvmm->object.client;
92 	union {
93 		struct nvif_vmm_map_v0 v0;
94 	} *args = argv;
95 	u64 addr, size, handle, offset;
96 	struct nvkm_vmm *vmm = uvmm->vmm;
97 	struct nvkm_vma *vma;
98 	struct nvkm_memory *memory;
99 	int ret = -ENOSYS;
100 
101 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
102 		addr = args->v0.addr;
103 		size = args->v0.size;
104 		handle = args->v0.memory;
105 		offset = args->v0.offset;
106 	} else
107 		return ret;
108 
109 	memory = nvkm_umem_search(client, handle);
110 	if (IS_ERR(memory)) {
111 		VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
112 		return PTR_ERR(memory);
113 	}
114 
115 	mutex_lock(&vmm->mutex);
116 	if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) {
117 		VMM_DEBUG(vmm, "lookup %016llx", addr);
118 		goto fail;
119 	}
120 
121 	if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
122 		VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
123 			  vma->user, !client->super, vma->busy);
124 		goto fail;
125 	}
126 
127 	if (ret = -EINVAL, vma->addr != addr || vma->size != size) {
128 		if (addr + size > vma->addr + vma->size || vma->memory ||
129 		    (vma->refd == NVKM_VMA_PAGE_NONE && !vma->mapref)) {
130 			VMM_DEBUG(vmm, "split %d %d %d "
131 				       "%016llx %016llx %016llx %016llx",
132 				  !!vma->memory, vma->refd, vma->mapref,
133 				  addr, size, vma->addr, (u64)vma->size);
134 			goto fail;
135 		}
136 
137 		vma = nvkm_vmm_node_split(vmm, vma, addr, size);
138 		if (!vma) {
139 			ret = -ENOMEM;
140 			goto fail;
141 		}
142 	}
143 	vma->busy = true;
144 	mutex_unlock(&vmm->mutex);
145 
146 	ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
147 	if (ret == 0) {
148 		/* Successful map will clear vma->busy. */
149 		nvkm_memory_unref(&memory);
150 		return 0;
151 	}
152 
153 	mutex_lock(&vmm->mutex);
154 	vma->busy = false;
155 	nvkm_vmm_unmap_region(vmm, vma);
156 fail:
157 	mutex_unlock(&vmm->mutex);
158 	nvkm_memory_unref(&memory);
159 	return ret;
160 }
161 
162 static int
163 nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
164 {
165 	struct nvkm_client *client = uvmm->object.client;
166 	union {
167 		struct nvif_vmm_put_v0 v0;
168 	} *args = argv;
169 	struct nvkm_vmm *vmm = uvmm->vmm;
170 	struct nvkm_vma *vma;
171 	int ret = -ENOSYS;
172 	u64 addr;
173 
174 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
175 		addr = args->v0.addr;
176 	} else
177 		return ret;
178 
179 	mutex_lock(&vmm->mutex);
180 	vma = nvkm_vmm_node_search(vmm, args->v0.addr);
181 	if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) {
182 		VMM_DEBUG(vmm, "lookup %016llx: %016llx %d", addr,
183 			  vma ? vma->addr : ~0ULL, vma ? vma->part : 0);
184 		goto done;
185 	}
186 
187 	if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
188 		VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
189 			  vma->user, !client->super, vma->busy);
190 		goto done;
191 	}
192 
193 	nvkm_vmm_put_locked(vmm, vma);
194 	ret = 0;
195 done:
196 	mutex_unlock(&vmm->mutex);
197 	return ret;
198 }
199 
200 static int
201 nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
202 {
203 	struct nvkm_client *client = uvmm->object.client;
204 	union {
205 		struct nvif_vmm_get_v0 v0;
206 	} *args = argv;
207 	struct nvkm_vmm *vmm = uvmm->vmm;
208 	struct nvkm_vma *vma;
209 	int ret = -ENOSYS;
210 	bool getref, mapref, sparse;
211 	u8 page, align;
212 	u64 size;
213 
214 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
215 		getref = args->v0.type == NVIF_VMM_GET_V0_PTES;
216 		mapref = args->v0.type == NVIF_VMM_GET_V0_ADDR;
217 		sparse = args->v0.sparse;
218 		page = args->v0.page;
219 		align = args->v0.align;
220 		size = args->v0.size;
221 	} else
222 		return ret;
223 
224 	mutex_lock(&vmm->mutex);
225 	ret = nvkm_vmm_get_locked(vmm, getref, mapref, sparse,
226 				  page, align, size, &vma);
227 	mutex_unlock(&vmm->mutex);
228 	if (ret)
229 		return ret;
230 
231 	args->v0.addr = vma->addr;
232 	vma->user = !client->super;
233 	return ret;
234 }
235 
236 static int
237 nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
238 {
239 	union {
240 		struct nvif_vmm_page_v0 v0;
241 	} *args = argv;
242 	const struct nvkm_vmm_page *page;
243 	int ret = -ENOSYS;
244 	u8 type, index, nr;
245 
246 	page = uvmm->vmm->func->page;
247 	for (nr = 0; page[nr].shift; nr++);
248 
249 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
250 		if ((index = args->v0.index) >= nr)
251 			return -EINVAL;
252 		type = page[index].type;
253 		args->v0.shift = page[index].shift;
254 		args->v0.sparse = !!(type & NVKM_VMM_PAGE_SPARSE);
255 		args->v0.vram = !!(type & NVKM_VMM_PAGE_VRAM);
256 		args->v0.host = !!(type & NVKM_VMM_PAGE_HOST);
257 		args->v0.comp = !!(type & NVKM_VMM_PAGE_COMP);
258 	} else
259 		return -ENOSYS;
260 
261 	return 0;
262 }
263 
264 static int
265 nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
266 {
267 	struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
268 	switch (mthd) {
269 	case NVIF_VMM_V0_PAGE  : return nvkm_uvmm_mthd_page  (uvmm, argv, argc);
270 	case NVIF_VMM_V0_GET   : return nvkm_uvmm_mthd_get   (uvmm, argv, argc);
271 	case NVIF_VMM_V0_PUT   : return nvkm_uvmm_mthd_put   (uvmm, argv, argc);
272 	case NVIF_VMM_V0_MAP   : return nvkm_uvmm_mthd_map   (uvmm, argv, argc);
273 	case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc);
274 	default:
275 		break;
276 	}
277 	return -EINVAL;
278 }
279 
280 static void *
281 nvkm_uvmm_dtor(struct nvkm_object *object)
282 {
283 	struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
284 	nvkm_vmm_unref(&uvmm->vmm);
285 	return uvmm;
286 }
287 
288 static const struct nvkm_object_func
289 nvkm_uvmm = {
290 	.dtor = nvkm_uvmm_dtor,
291 	.mthd = nvkm_uvmm_mthd,
292 };
293 
294 int
295 nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
296 	      struct nvkm_object **pobject)
297 {
298 	struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
299 	const bool more = oclass->base.maxver >= 0;
300 	union {
301 		struct nvif_vmm_v0 v0;
302 	} *args = argv;
303 	const struct nvkm_vmm_page *page;
304 	struct nvkm_uvmm *uvmm;
305 	int ret = -ENOSYS;
306 	u64 addr, size;
307 
308 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, more))) {
309 		addr = args->v0.addr;
310 		size = args->v0.size;
311 	} else
312 		return ret;
313 
314 	if (!(uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL)))
315 		return -ENOMEM;
316 	nvkm_object_ctor(&nvkm_uvmm, oclass, &uvmm->object);
317 	*pobject = &uvmm->object;
318 
319 	if (!mmu->vmm) {
320 		ret = mmu->func->vmm.ctor(mmu, addr, size, argv, argc,
321 					  NULL, "user", &uvmm->vmm);
322 		if (ret)
323 			return ret;
324 
325 		uvmm->vmm->debug = max(uvmm->vmm->debug, oclass->client->debug);
326 	} else {
327 		if (size)
328 			return -EINVAL;
329 
330 		uvmm->vmm = nvkm_vmm_ref(mmu->vmm);
331 	}
332 
333 	page = uvmm->vmm->func->page;
334 	args->v0.page_nr = 0;
335 	while (page && (page++)->shift)
336 		args->v0.page_nr++;
337 	args->v0.addr = uvmm->vmm->start;
338 	args->v0.size = uvmm->vmm->limit;
339 	return 0;
340 }
341