xref: /openbmc/linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c (revision 6de125383a5cce5f0d9235a6d3a9ae83dc5d299e)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "priv.h"
25 #include "chan.h"
26 #include "chid.h"
27 #include "runl.h"
28 #include "runq.h"
29 
30 #include <core/gpuobj.h>
31 #include <subdev/mc.h>
32 
33 #include <nvif/cl0080.h>
34 #include <nvif/unpack.h>
35 
36 void
37 nvkm_fifo_recover_chan(struct nvkm_fifo *fifo, int chid)
38 {
39 	unsigned long flags;
40 	if (WARN_ON(!fifo->func->recover_chan))
41 		return;
42 	spin_lock_irqsave(&fifo->lock, flags);
43 	fifo->func->recover_chan(fifo, chid);
44 	spin_unlock_irqrestore(&fifo->lock, flags);
45 }
46 
47 void
48 nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags)
49 {
50 	return fifo->func->pause(fifo, flags);
51 }
52 
53 void
54 nvkm_fifo_start(struct nvkm_fifo *fifo, unsigned long *flags)
55 {
56 	return fifo->func->start(fifo, flags);
57 }
58 
59 void
60 nvkm_fifo_fault(struct nvkm_fifo *fifo, struct nvkm_fault_data *info)
61 {
62 	return fifo->func->mmu_fault->recover(fifo, info);
63 }
64 
65 void
66 nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags,
67 		   struct nvkm_fifo_chan **pchan)
68 {
69 	struct nvkm_fifo_chan *chan = *pchan;
70 	if (likely(chan)) {
71 		*pchan = NULL;
72 		spin_unlock_irqrestore(&fifo->lock, flags);
73 	}
74 }
75 
76 struct nvkm_fifo_chan *
77 nvkm_fifo_chan_inst_locked(struct nvkm_fifo *fifo, u64 inst)
78 {
79 	struct nvkm_fifo_chan *chan;
80 	list_for_each_entry(chan, &fifo->chan, head) {
81 		if (chan->inst->addr == inst) {
82 			list_del(&chan->head);
83 			list_add(&chan->head, &fifo->chan);
84 			return chan;
85 		}
86 	}
87 	return NULL;
88 }
89 
90 struct nvkm_fifo_chan *
91 nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags)
92 {
93 	struct nvkm_fifo_chan *chan;
94 	unsigned long flags;
95 	spin_lock_irqsave(&fifo->lock, flags);
96 	if ((chan = nvkm_fifo_chan_inst_locked(fifo, inst))) {
97 		*rflags = flags;
98 		return chan;
99 	}
100 	spin_unlock_irqrestore(&fifo->lock, flags);
101 	return NULL;
102 }
103 
104 struct nvkm_fifo_chan *
105 nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags)
106 {
107 	struct nvkm_fifo_chan *chan;
108 	unsigned long flags;
109 	spin_lock_irqsave(&fifo->lock, flags);
110 	list_for_each_entry(chan, &fifo->chan, head) {
111 		if (chan->chid == chid) {
112 			list_del(&chan->head);
113 			list_add(&chan->head, &fifo->chan);
114 			*rflags = flags;
115 			return chan;
116 		}
117 	}
118 	spin_unlock_irqrestore(&fifo->lock, flags);
119 	return NULL;
120 }
121 
122 void
123 nvkm_fifo_kevent(struct nvkm_fifo *fifo, int chid)
124 {
125 	nvkm_event_ntfy(&fifo->kevent, chid, NVKM_FIFO_EVENT_KILLED);
126 }
127 
128 static const struct nvkm_event_func
129 nvkm_fifo_kevent_func = {
130 };
131 
132 static void
133 nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
134 {
135 	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
136 	fifo->func->uevent_fini(fifo);
137 }
138 
139 static void
140 nvkm_fifo_uevent_init(struct nvkm_event *event, int type, int index)
141 {
142 	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
143 	fifo->func->uevent_init(fifo);
144 }
145 
146 static const struct nvkm_event_func
147 nvkm_fifo_uevent_func = {
148 	.init = nvkm_fifo_uevent_init,
149 	.fini = nvkm_fifo_uevent_fini,
150 };
151 
152 void
153 nvkm_fifo_uevent(struct nvkm_fifo *fifo)
154 {
155 	nvkm_event_ntfy(&fifo->uevent, 0, NVKM_FIFO_EVENT_NON_STALL_INTR);
156 }
157 
158 static int
159 nvkm_fifo_class_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
160 		    void *argv, u32 argc, struct nvkm_object **pobject)
161 {
162 	struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
163 
164 	if (oclass->engn == &fifo->func->chan.user)
165 		return nvkm_uchan_new(fifo, NULL, oclass, argv, argc, pobject);
166 
167 	WARN_ON(1);
168 	return -ENOSYS;
169 }
170 
171 static const struct nvkm_device_oclass
172 nvkm_fifo_class = {
173 	.ctor = nvkm_fifo_class_new,
174 };
175 
176 static int
177 nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index, const struct nvkm_device_oclass **class)
178 {
179 	struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
180 	const struct nvkm_fifo_func_chan *chan = &fifo->func->chan;
181 	int c = 0;
182 
183 	/* *_CHANNEL_DMA, *_CHANNEL_GPFIFO_* */
184 	if (chan->user.oclass) {
185 		if (c++ == index) {
186 			oclass->base = chan->user;
187 			oclass->engn = &fifo->func->chan.user;
188 			*class = &nvkm_fifo_class;
189 			return 0;
190 		}
191 	}
192 
193 	return c;
194 }
195 
196 static void
197 nvkm_fifo_intr(struct nvkm_engine *engine)
198 {
199 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
200 	fifo->func->intr(fifo);
201 }
202 
203 static int
204 nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend)
205 {
206 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
207 	if (fifo->func->fini)
208 		fifo->func->fini(fifo);
209 	return 0;
210 }
211 
212 static int
213 nvkm_fifo_init(struct nvkm_engine *engine)
214 {
215 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
216 	fifo->func->init(fifo);
217 	return 0;
218 }
219 
220 static int
221 nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
222 {
223 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
224 	struct nvkm_runl *runl;
225 	struct nvkm_engn *engn;
226 	int ret;
227 
228 	ret = nvkm_subdev_oneinit(&fifo->engine.subdev);
229 	if (ret)
230 		return ret;
231 
232 	switch (mthd) {
233 	case NV_DEVICE_HOST_CHANNELS: *data = fifo->chid ? fifo->chid->nr : 0; return 0;
234 	case NV_DEVICE_HOST_RUNLISTS:
235 		*data = 0;
236 		nvkm_runl_foreach(runl, fifo)
237 			*data |= BIT(runl->id);
238 		return 0;
239 	case NV_DEVICE_HOST_RUNLIST_ENGINES:
240 		runl = nvkm_runl_get(fifo, *data, 0);
241 		if (runl) {
242 			*data = 0;
243 			nvkm_runl_foreach_engn(engn, runl) {
244 #define CASE(n) case NVKM_ENGINE_##n: *data |= NV_DEVICE_HOST_RUNLIST_ENGINES_##n; break
245 				switch (engn->engine->subdev.type) {
246 				case NVKM_ENGINE_DMAOBJ:
247 					break;
248 				CASE(SW    );
249 				CASE(GR    );
250 				CASE(MPEG  );
251 				CASE(ME    );
252 				CASE(CIPHER);
253 				CASE(BSP   );
254 				CASE(VP    );
255 				CASE(CE    );
256 				CASE(SEC   );
257 				CASE(MSVLD );
258 				CASE(MSPDEC);
259 				CASE(MSPPP );
260 				CASE(MSENC );
261 				CASE(VIC   );
262 				CASE(SEC2  );
263 				CASE(NVDEC );
264 				CASE(NVENC );
265 				default:
266 					WARN_ON(1);
267 					break;
268 				}
269 #undef CASE
270 			}
271 			return 0;
272 		}
273 		return -EINVAL;
274 	default:
275 		break;
276 	}
277 
278 	return -ENOSYS;
279 }
280 
281 static int
282 nvkm_fifo_oneinit(struct nvkm_engine *engine)
283 {
284 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
285 	struct nvkm_runl *runl;
286 	struct nvkm_engn *engn;
287 	int ret, nr, i;
288 
289 	/* Initialise CHID/CGID allocator(s) on GPUs where they aren't per-runlist. */
290 	if (fifo->func->chid_nr) {
291 		ret = fifo->func->chid_ctor(fifo, fifo->func->chid_nr(fifo));
292 		if (ret)
293 			return ret;
294 	}
295 
296 	/* Create runqueues for each PBDMA. */
297 	if (fifo->func->runq_nr) {
298 		for (nr = fifo->func->runq_nr(fifo), i = 0; i < nr; i++) {
299 			if (!nvkm_runq_new(fifo, i))
300 				return -ENOMEM;
301 		}
302 	}
303 
304 	/* Create runlists. */
305 	ret = fifo->func->runl_ctor(fifo);
306 	if (ret)
307 		return ret;
308 
309 	nvkm_runl_foreach(runl, fifo) {
310 		RUNL_DEBUG(runl, "");
311 		nvkm_runl_foreach_engn(engn, runl) {
312 			ENGN_DEBUG(engn, "");
313 		}
314 	}
315 
316 	if (fifo->func->oneinit)
317 		return fifo->func->oneinit(fifo);
318 
319 	return 0;
320 }
321 
322 static void
323 nvkm_fifo_preinit(struct nvkm_engine *engine)
324 {
325 	nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO, 0);
326 }
327 
328 static void *
329 nvkm_fifo_dtor(struct nvkm_engine *engine)
330 {
331 	struct nvkm_fifo *fifo = nvkm_fifo(engine);
332 	struct nvkm_runl *runl, *runt;
333 	struct nvkm_runq *runq, *rtmp;
334 	void *data = fifo;
335 
336 	list_for_each_entry_safe(runl, runt, &fifo->runls, head)
337 		nvkm_runl_del(runl);
338 	list_for_each_entry_safe(runq, rtmp, &fifo->runqs, head)
339 		nvkm_runq_del(runq);
340 
341 	nvkm_chid_unref(&fifo->cgid);
342 	nvkm_chid_unref(&fifo->chid);
343 
344 	if (fifo->func->dtor)
345 		data = fifo->func->dtor(fifo);
346 	nvkm_event_fini(&fifo->kevent);
347 	nvkm_event_fini(&fifo->uevent);
348 	mutex_destroy(&fifo->mutex);
349 	return data;
350 }
351 
352 static const struct nvkm_engine_func
353 nvkm_fifo = {
354 	.dtor = nvkm_fifo_dtor,
355 	.preinit = nvkm_fifo_preinit,
356 	.oneinit = nvkm_fifo_oneinit,
357 	.info = nvkm_fifo_info,
358 	.init = nvkm_fifo_init,
359 	.fini = nvkm_fifo_fini,
360 	.intr = nvkm_fifo_intr,
361 	.base.sclass = nvkm_fifo_class_get,
362 };
363 
364 int
365 nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
366 	       enum nvkm_subdev_type type, int inst, struct nvkm_fifo *fifo)
367 {
368 	int ret, nr;
369 
370 	fifo->func = func;
371 	INIT_LIST_HEAD(&fifo->runqs);
372 	INIT_LIST_HEAD(&fifo->runls);
373 	spin_lock_init(&fifo->lock);
374 	mutex_init(&fifo->mutex);
375 
376 	ret = nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
377 	if (ret)
378 		return ret;
379 
380 	INIT_LIST_HEAD(&fifo->chan);
381 
382 	nr = func->chid_nr(fifo);
383 	if (WARN_ON(fifo->nr > NVKM_FIFO_CHID_NR))
384 		fifo->nr = NVKM_FIFO_CHID_NR;
385 	else
386 		fifo->nr = nr;
387 
388 	if (func->uevent_init) {
389 		ret = nvkm_event_init(&nvkm_fifo_uevent_func, &fifo->engine.subdev, 1, 1,
390 				      &fifo->uevent);
391 		if (ret)
392 			return ret;
393 	}
394 
395 	return nvkm_event_init(&nvkm_fifo_kevent_func, &fifo->engine.subdev, 1, nr, &fifo->kevent);
396 }
397