xref: /openbmc/linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c (revision f48dd2936138882d7755cbbc5d9984015c75980c)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "priv.h"
25 #include "chan.h"
26 #include "chid.h"
27 #include "runl.h"
28 #include "runq.h"
29 
30 #include <core/gpuobj.h>
31 #include <subdev/mc.h>
32 
33 #include <nvif/cl0080.h>
34 #include <nvif/unpack.h>
35 
36 void
37 nvkm_fifo_recover_chan(struct nvkm_fifo *fifo, int chid)
38 {
39 	unsigned long flags;
40 	if (WARN_ON(!fifo->func->recover_chan))
41 		return;
42 	spin_lock_irqsave(&fifo->lock, flags);
43 	fifo->func->recover_chan(fifo, chid);
44 	spin_unlock_irqrestore(&fifo->lock, flags);
45 }
46 
47 void
48 nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags)
49 {
50 	return fifo->func->pause(fifo, flags);
51 }
52 
53 void
54 nvkm_fifo_start(struct nvkm_fifo *fifo, unsigned long *flags)
55 {
56 	return fifo->func->start(fifo, flags);
57 }
58 
59 void
60 nvkm_fifo_fault(struct nvkm_fifo *fifo, struct nvkm_fault_data *info)
61 {
62 	return fifo->func->mmu_fault->recover(fifo, info);
63 }
64 
65 void
66 nvkm_fifo_kevent(struct nvkm_fifo *fifo, int chid)
67 {
68 	nvkm_event_ntfy(&fifo->kevent, chid, NVKM_FIFO_EVENT_KILLED);
69 }
70 
71 static const struct nvkm_event_func
72 nvkm_fifo_kevent_func = {
73 };
74 
75 static int
76 nvkm_fifo_class_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
77 		    void *argv, u32 argc, struct nvkm_object **pobject)
78 {
79 	struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
80 
81 	if (oclass->engn == &fifo->func->chan.user)
82 		return nvkm_uchan_new(fifo, NULL, oclass, argv, argc, pobject);
83 
84 	WARN_ON(1);
85 	return -ENOSYS;
86 }
87 
88 static const struct nvkm_device_oclass
89 nvkm_fifo_class = {
90 	.ctor = nvkm_fifo_class_new,
91 };
92 
93 static int
94 nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index, const struct nvkm_device_oclass **class)
95 {
96 	struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
97 	const struct nvkm_fifo_func_chan *chan = &fifo->func->chan;
98 	int c = 0;
99 
100 	/* *_CHANNEL_DMA, *_CHANNEL_GPFIFO_* */
101 	if (chan->user.oclass) {
102 		if (c++ == index) {
103 			oclass->base = chan->user;
104 			oclass->engn = &fifo->func->chan.user;
105 			*class = &nvkm_fifo_class;
106 			return 0;
107 		}
108 	}
109 
110 	return c;
111 }
112 
113 static int
114 nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend)
115 {
116 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
117 
118 	nvkm_inth_block(&fifo->engine.subdev.inth);
119 
120 	if (fifo->func->fini)
121 		fifo->func->fini(fifo);
122 
123 	return 0;
124 }
125 
126 static int
127 nvkm_fifo_init(struct nvkm_engine *engine)
128 {
129 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
130 	struct nvkm_runq *runq;
131 	u32 mask = 0;
132 
133 	if (fifo->func->init_pbdmas) {
134 		nvkm_runq_foreach(runq, fifo)
135 			mask |= BIT(runq->id);
136 
137 		fifo->func->init_pbdmas(fifo, mask);
138 
139 		nvkm_runq_foreach(runq, fifo)
140 			runq->func->init(runq);
141 	}
142 
143 	fifo->func->init(fifo);
144 
145 	nvkm_inth_allow(&fifo->engine.subdev.inth);
146 	return 0;
147 }
148 
149 static int
150 nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
151 {
152 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
153 	struct nvkm_runl *runl;
154 	struct nvkm_engn *engn;
155 	int ret;
156 
157 	ret = nvkm_subdev_oneinit(&fifo->engine.subdev);
158 	if (ret)
159 		return ret;
160 
161 	switch (mthd) {
162 	case NV_DEVICE_HOST_CHANNELS: *data = fifo->chid ? fifo->chid->nr : 0; return 0;
163 	case NV_DEVICE_HOST_RUNLISTS:
164 		*data = 0;
165 		nvkm_runl_foreach(runl, fifo)
166 			*data |= BIT(runl->id);
167 		return 0;
168 	case NV_DEVICE_HOST_RUNLIST_ENGINES:
169 		runl = nvkm_runl_get(fifo, *data, 0);
170 		if (runl) {
171 			*data = 0;
172 			nvkm_runl_foreach_engn(engn, runl) {
173 #define CASE(n) case NVKM_ENGINE_##n: *data |= NV_DEVICE_HOST_RUNLIST_ENGINES_##n; break
174 				switch (engn->engine->subdev.type) {
175 				case NVKM_ENGINE_DMAOBJ:
176 					break;
177 				CASE(SW    );
178 				CASE(GR    );
179 				CASE(MPEG  );
180 				CASE(ME    );
181 				CASE(CIPHER);
182 				CASE(BSP   );
183 				CASE(VP    );
184 				CASE(CE    );
185 				CASE(SEC   );
186 				CASE(MSVLD );
187 				CASE(MSPDEC);
188 				CASE(MSPPP );
189 				CASE(MSENC );
190 				CASE(VIC   );
191 				CASE(SEC2  );
192 				CASE(NVDEC );
193 				CASE(NVENC );
194 				default:
195 					WARN_ON(1);
196 					break;
197 				}
198 #undef CASE
199 			}
200 			return 0;
201 		}
202 		return -EINVAL;
203 	case NV_DEVICE_HOST_RUNLIST_CHANNELS:
204 		if (!fifo->chid) {
205 			runl = nvkm_runl_get(fifo, *data, 0);
206 			if (runl) {
207 				*data = runl->chid->nr;
208 				return 0;
209 			}
210 		}
211 		return -EINVAL;
212 	default:
213 		break;
214 	}
215 
216 	return -ENOSYS;
217 }
218 
219 static int
220 nvkm_fifo_oneinit(struct nvkm_engine *engine)
221 {
222 	struct nvkm_subdev *subdev = &engine->subdev;
223 	struct nvkm_device *device = subdev->device;
224 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
225 	struct nvkm_runl *runl;
226 	struct nvkm_engn *engn;
227 	int ret, nr, i;
228 
229 	/* Initialise CHID/CGID allocator(s) on GPUs where they aren't per-runlist. */
230 	if (fifo->func->chid_nr) {
231 		ret = fifo->func->chid_ctor(fifo, fifo->func->chid_nr(fifo));
232 		if (ret)
233 			return ret;
234 	}
235 
236 	/* Create runqueues for each PBDMA. */
237 	if (fifo->func->runq_nr) {
238 		for (nr = fifo->func->runq_nr(fifo), i = 0; i < nr; i++) {
239 			if (!nvkm_runq_new(fifo, i))
240 				return -ENOMEM;
241 		}
242 	}
243 
244 	/* Create runlists. */
245 	ret = fifo->func->runl_ctor(fifo);
246 	if (ret)
247 		return ret;
248 
249 	nvkm_runl_foreach(runl, fifo) {
250 		RUNL_DEBUG(runl, "");
251 		nvkm_runl_foreach_engn(engn, runl) {
252 			ENGN_DEBUG(engn, "");
253 		}
254 	}
255 
256 	/* Register interrupt handler. */
257 	if (fifo->func->intr) {
258 		ret = nvkm_inth_add(&device->mc->intr, NVKM_INTR_SUBDEV, NVKM_INTR_PRIO_NORMAL,
259 				    subdev, fifo->func->intr, &subdev->inth);
260 		if (ret) {
261 			nvkm_error(subdev, "intr %d\n", ret);
262 			return ret;
263 		}
264 	}
265 
266 	if (fifo->func->oneinit)
267 		return fifo->func->oneinit(fifo);
268 
269 	return 0;
270 }
271 
272 static void
273 nvkm_fifo_preinit(struct nvkm_engine *engine)
274 {
275 	nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO, 0);
276 }
277 
278 static void *
279 nvkm_fifo_dtor(struct nvkm_engine *engine)
280 {
281 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
282 	struct nvkm_runl *runl, *runt;
283 	struct nvkm_runq *runq, *rtmp;
284 	void *data = fifo;
285 
286 	list_for_each_entry_safe(runl, runt, &fifo->runls, head)
287 		nvkm_runl_del(runl);
288 	list_for_each_entry_safe(runq, rtmp, &fifo->runqs, head)
289 		nvkm_runq_del(runq);
290 
291 	nvkm_chid_unref(&fifo->cgid);
292 	nvkm_chid_unref(&fifo->chid);
293 
294 	if (fifo->func->dtor)
295 		data = fifo->func->dtor(fifo);
296 	nvkm_event_fini(&fifo->kevent);
297 	nvkm_event_fini(&fifo->nonstall.event);
298 	mutex_destroy(&fifo->mutex);
299 	return data;
300 }
301 
302 static const struct nvkm_engine_func
303 nvkm_fifo = {
304 	.dtor = nvkm_fifo_dtor,
305 	.preinit = nvkm_fifo_preinit,
306 	.oneinit = nvkm_fifo_oneinit,
307 	.info = nvkm_fifo_info,
308 	.init = nvkm_fifo_init,
309 	.fini = nvkm_fifo_fini,
310 	.base.sclass = nvkm_fifo_class_get,
311 };
312 
313 int
314 nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
315 	       enum nvkm_subdev_type type, int inst, struct nvkm_fifo *fifo)
316 {
317 	int ret, nr;
318 
319 	fifo->func = func;
320 	INIT_LIST_HEAD(&fifo->runqs);
321 	INIT_LIST_HEAD(&fifo->runls);
322 	spin_lock_init(&fifo->lock);
323 	mutex_init(&fifo->mutex);
324 
325 	ret = nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
326 	if (ret)
327 		return ret;
328 
329 	INIT_LIST_HEAD(&fifo->chan);
330 
331 	nr = func->chid_nr(fifo);
332 	fifo->nr = nr;
333 
334 	if (func->nonstall) {
335 		ret = nvkm_event_init(func->nonstall, &fifo->engine.subdev, 1, 1,
336 				      &fifo->nonstall.event);
337 		if (ret)
338 			return ret;
339 	}
340 
341 	return nvkm_event_init(&nvkm_fifo_kevent_func, &fifo->engine.subdev, 1, nr, &fifo->kevent);
342 }
343