1 /*
2  * Copyright 2017 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "ummu.h"
23 #include "umem.h"
24 #include "uvmm.h"
25 
26 #include <core/client.h>
27 
28 #include <nvif/if0008.h>
29 #include <nvif/unpack.h>
30 
31 static int
32 nvkm_ummu_sclass(struct nvkm_object *object, int index,
33 		 struct nvkm_oclass *oclass)
34 {
35 	struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu;
36 
37 	if (mmu->func->mem.user.oclass && oclass->client->super) {
38 		if (index-- == 0) {
39 			oclass->base = mmu->func->mem.user;
40 			oclass->ctor = nvkm_umem_new;
41 			return 0;
42 		}
43 	}
44 
45 	if (mmu->func->vmm.user.oclass) {
46 		if (index-- == 0) {
47 			oclass->base = mmu->func->vmm.user;
48 			oclass->ctor = nvkm_uvmm_new;
49 			return 0;
50 		}
51 	}
52 
53 	return -EINVAL;
54 }
55 
56 static int
57 nvkm_ummu_heap(struct nvkm_ummu *ummu, void *argv, u32 argc)
58 {
59 	struct nvkm_mmu *mmu = ummu->mmu;
60 	union {
61 		struct nvif_mmu_heap_v0 v0;
62 	} *args = argv;
63 	int ret = -ENOSYS;
64 	u8 index;
65 
66 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
67 		if ((index = args->v0.index) >= mmu->heap_nr)
68 			return -EINVAL;
69 		args->v0.size = mmu->heap[index].size;
70 	} else
71 		return ret;
72 
73 	return 0;
74 }
75 
76 static int
77 nvkm_ummu_type(struct nvkm_ummu *ummu, void *argv, u32 argc)
78 {
79 	struct nvkm_mmu *mmu = ummu->mmu;
80 	union {
81 		struct nvif_mmu_type_v0 v0;
82 	} *args = argv;
83 	int ret = -ENOSYS;
84 	u8 type, index;
85 
86 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
87 		if ((index = args->v0.index) >= mmu->type_nr)
88 			return -EINVAL;
89 		type = mmu->type[index].type;
90 		args->v0.heap = mmu->type[index].heap;
91 		args->v0.vram = !!(type & NVKM_MEM_VRAM);
92 		args->v0.host = !!(type & NVKM_MEM_HOST);
93 		args->v0.comp = !!(type & NVKM_MEM_COMP);
94 		args->v0.disp = !!(type & NVKM_MEM_DISP);
95 		args->v0.kind = !!(type & NVKM_MEM_KIND);
96 		args->v0.mappable = !!(type & NVKM_MEM_MAPPABLE);
97 		args->v0.coherent = !!(type & NVKM_MEM_COHERENT);
98 		args->v0.uncached = !!(type & NVKM_MEM_UNCACHED);
99 	} else
100 		return ret;
101 
102 	return 0;
103 }
104 
105 static int
106 nvkm_ummu_kind(struct nvkm_ummu *ummu, void *argv, u32 argc)
107 {
108 	struct nvkm_mmu *mmu = ummu->mmu;
109 	union {
110 		struct nvif_mmu_kind_v0 v0;
111 	} *args = argv;
112 	const u8 *kind = NULL;
113 	int ret = -ENOSYS, count = 0;
114 
115 	if (mmu->func->kind)
116 		kind = mmu->func->kind(mmu, &count);
117 
118 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
119 		if (argc != args->v0.count * sizeof(*args->v0.data))
120 			return -EINVAL;
121 		if (args->v0.count > count)
122 			return -EINVAL;
123 		memcpy(args->v0.data, kind, args->v0.count);
124 	} else
125 		return ret;
126 
127 	return 0;
128 }
129 
130 static int
131 nvkm_ummu_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
132 {
133 	struct nvkm_ummu *ummu = nvkm_ummu(object);
134 	switch (mthd) {
135 	case NVIF_MMU_V0_HEAP: return nvkm_ummu_heap(ummu, argv, argc);
136 	case NVIF_MMU_V0_TYPE: return nvkm_ummu_type(ummu, argv, argc);
137 	case NVIF_MMU_V0_KIND: return nvkm_ummu_kind(ummu, argv, argc);
138 	default:
139 		break;
140 	}
141 	return -EINVAL;
142 }
143 
144 static const struct nvkm_object_func
145 nvkm_ummu = {
146 	.mthd = nvkm_ummu_mthd,
147 	.sclass = nvkm_ummu_sclass,
148 };
149 
150 int
151 nvkm_ummu_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
152 	      void *argv, u32 argc, struct nvkm_object **pobject)
153 {
154 	union {
155 		struct nvif_mmu_v0 v0;
156 	} *args = argv;
157 	struct nvkm_mmu *mmu = device->mmu;
158 	struct nvkm_ummu *ummu;
159 	int ret = -ENOSYS, kinds = 0;
160 
161 	if (mmu->func->kind)
162 		mmu->func->kind(mmu, &kinds);
163 
164 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
165 		args->v0.dmabits = mmu->dma_bits;
166 		args->v0.heap_nr = mmu->heap_nr;
167 		args->v0.type_nr = mmu->type_nr;
168 		args->v0.kind_nr = kinds;
169 	} else
170 		return ret;
171 
172 	if (!(ummu = kzalloc(sizeof(*ummu), GFP_KERNEL)))
173 		return -ENOMEM;
174 	nvkm_object_ctor(&nvkm_ummu, oclass, &ummu->object);
175 	ummu->mmu = mmu;
176 	*pobject = &ummu->object;
177 	return 0;
178 }
179