xref: /openbmc/linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c (revision 7ac293328122075a2afc40a4089e7afc6cbc26eb)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "cgrp.h"
25 #include "chan.h"
26 #include "chid.h"
27 #include "runl.h"
28 
29 #include "nv04.h"
30 #include "channv04.h"
31 #include "regsnv04.h"
32 
33 #include <core/ramht.h>
34 #include <subdev/instmem.h>
35 #include <subdev/mc.h>
36 #include <subdev/timer.h>
37 #include <engine/sw.h>
38 
39 #include <nvif/class.h>
40 
41 void
42 nv04_chan_stop(struct nvkm_chan *chan)
43 {
44 	struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
45 	struct nvkm_device *device = fifo->engine.subdev.device;
46 	struct nvkm_memory *fctx = device->imem->ramfc;
47 	const struct nvkm_ramfc_layout *c;
48 	unsigned long flags;
49 	u32 data = chan->ramfc_offset;
50 	u32 chid;
51 
52 	/* prevent fifo context switches */
53 	spin_lock_irqsave(&fifo->lock, flags);
54 	nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
55 
56 	/* if this channel is active, replace it with a null context */
57 	chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->chid->mask;
58 	if (chid == chan->id) {
59 		nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
60 		nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0);
61 		nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
62 
63 		c = chan->func->ramfc->layout;
64 		nvkm_kmap(fctx);
65 		do {
66 			u32 rm = ((1ULL << c->bits) - 1) << c->regs;
67 			u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
68 			u32 rv = (nvkm_rd32(device, c->regp) &  rm) >> c->regs;
69 			u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm);
70 			nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
71 		} while ((++c)->bits);
72 		nvkm_done(fctx);
73 
74 		c = chan->func->ramfc->layout;
75 		do {
76 			nvkm_wr32(device, c->regp, 0x00000000);
77 		} while ((++c)->bits);
78 
79 		nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0);
80 		nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0);
81 		nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->chid->mask);
82 		nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
83 		nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
84 	}
85 
86 	/* restore normal operation, after disabling dma mode */
87 	nvkm_mask(device, NV04_PFIFO_MODE, BIT(chan->id), 0);
88 	nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
89 	spin_unlock_irqrestore(&fifo->lock, flags);
90 }
91 
92 void
93 nv04_chan_start(struct nvkm_chan *chan)
94 {
95 	struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
96 	unsigned long flags;
97 
98 	spin_lock_irqsave(&fifo->lock, flags);
99 	nvkm_mask(fifo->engine.subdev.device, NV04_PFIFO_MODE, BIT(chan->id), BIT(chan->id));
100 	spin_unlock_irqrestore(&fifo->lock, flags);
101 }
102 
103 void
104 nv04_chan_ramfc_clear(struct nvkm_chan *chan)
105 {
106 	struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc;
107 	const struct nvkm_ramfc_layout *c = chan->func->ramfc->layout;
108 
109 	nvkm_kmap(ramfc);
110 	do {
111 		nvkm_wo32(ramfc, chan->ramfc_offset + c->ctxp, 0x00000000);
112 	} while ((++c)->bits);
113 	nvkm_done(ramfc);
114 }
115 
116 static int
117 nv04_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
118 {
119 	struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc;
120 	const u32 base = chan->id * 32;
121 
122 	chan->ramfc_offset = base;
123 
124 	nvkm_kmap(ramfc);
125 	nvkm_wo32(ramfc, base + 0x00, offset);
126 	nvkm_wo32(ramfc, base + 0x04, offset);
127 	nvkm_wo32(ramfc, base + 0x08, chan->push->addr >> 4);
128 	nvkm_wo32(ramfc, base + 0x10, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
129 				      NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
130 #ifdef __BIG_ENDIAN
131 				      NV_PFIFO_CACHE1_BIG_ENDIAN |
132 #endif
133 				      NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
134 	nvkm_done(ramfc);
135 	return 0;
136 }
137 
138 static const struct nvkm_chan_func_ramfc
139 nv04_chan_ramfc = {
140 	.layout = (const struct nvkm_ramfc_layout[]) {
141 		{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
142 		{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
143 		{ 16,  0, 0x08,  0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
144 		{ 16, 16, 0x08,  0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
145 		{ 32,  0, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_STATE },
146 		{ 32,  0, 0x10,  0, NV04_PFIFO_CACHE1_DMA_FETCH },
147 		{ 32,  0, 0x14,  0, NV04_PFIFO_CACHE1_ENGINE },
148 		{ 32,  0, 0x18,  0, NV04_PFIFO_CACHE1_PULL1 },
149 		{}
150 	},
151 	.write = nv04_chan_ramfc_write,
152 	.clear = nv04_chan_ramfc_clear,
153 	.ctxdma = true,
154 };
155 
156 const struct nvkm_chan_func_userd
157 nv04_chan_userd = {
158 	.bar = 0,
159 	.base = 0x800000,
160 	.size = 0x010000,
161 };
162 
163 const struct nvkm_chan_func_inst
164 nv04_chan_inst = {
165 	.size = 0x1000,
166 };
167 
168 static const struct nvkm_chan_func
169 nv04_chan = {
170 	.inst = &nv04_chan_inst,
171 	.userd = &nv04_chan_userd,
172 	.ramfc = &nv04_chan_ramfc,
173 	.start = nv04_chan_start,
174 	.stop = nv04_chan_stop,
175 };
176 
177 const struct nvkm_cgrp_func
178 nv04_cgrp = {
179 };
180 
181 void
182 nv04_eobj_ramht_del(struct nvkm_chan *chan, int hash)
183 {
184 	struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
185 	struct nvkm_instmem *imem = fifo->engine.subdev.device->imem;
186 
187 	mutex_lock(&fifo->mutex);
188 	nvkm_ramht_remove(imem->ramht, hash);
189 	mutex_unlock(&fifo->mutex);
190 }
191 
192 static int
193 nv04_eobj_ramht_add(struct nvkm_engn *engn, struct nvkm_object *eobj, struct nvkm_chan *chan)
194 {
195 	struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
196 	struct nvkm_instmem *imem = fifo->engine.subdev.device->imem;
197 	u32 context = 0x80000000 | chan->id << 24 | engn->id << 16;
198 	int hash;
199 
200 	mutex_lock(&fifo->mutex);
201 	hash = nvkm_ramht_insert(imem->ramht, eobj, chan->id, 4, eobj->handle, context);
202 	mutex_unlock(&fifo->mutex);
203 	return hash;
204 }
205 
206 const struct nvkm_engn_func
207 nv04_engn = {
208 	.ramht_add = nv04_eobj_ramht_add,
209 	.ramht_del = nv04_eobj_ramht_del,
210 };
211 
212 void
213 nv04_fifo_pause(struct nvkm_fifo *fifo, unsigned long *pflags)
214 __acquires(fifo->lock)
215 {
216 	struct nvkm_device *device = fifo->engine.subdev.device;
217 	unsigned long flags;
218 
219 	spin_lock_irqsave(&fifo->lock, flags);
220 	*pflags = flags;
221 
222 	nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000);
223 	nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
224 
225 	/* in some cases the puller may be left in an inconsistent state
226 	 * if you try to stop it while it's busy translating handles.
227 	 * sometimes you get a CACHE_ERROR, sometimes it just fails
228 	 * silently; sending incorrect instance offsets to PGRAPH after
229 	 * it's started up again.
230 	 *
231 	 * to avoid this, we invalidate the most recently calculated
232 	 * instance.
233 	 */
234 	nvkm_msec(device, 2000,
235 		u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0);
236 		if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY))
237 			break;
238 	);
239 
240 	if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) &
241 			  NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
242 		nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
243 
244 	nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000);
245 }
246 
247 void
248 nv04_fifo_start(struct nvkm_fifo *fifo, unsigned long *pflags)
249 __releases(fifo->lock)
250 {
251 	struct nvkm_device *device = fifo->engine.subdev.device;
252 	unsigned long flags = *pflags;
253 
254 	nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
255 	nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001);
256 
257 	spin_unlock_irqrestore(&fifo->lock, flags);
258 }
259 
260 const struct nvkm_runl_func
261 nv04_runl = {
262 };
263 
264 static const char *
265 nv_dma_state_err(u32 state)
266 {
267 	static const char * const desc[] = {
268 		"NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
269 		"INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
270 	};
271 	return desc[(state >> 29) & 0x7];
272 }
273 
274 static bool
275 nv04_fifo_swmthd(struct nvkm_device *device, u32 chid, u32 addr, u32 data)
276 {
277 	struct nvkm_sw *sw = device->sw;
278 	const int subc = (addr & 0x0000e000) >> 13;
279 	const int mthd = (addr & 0x00001ffc);
280 	const u32 mask = 0x0000000f << (subc * 4);
281 	u32 engine = nvkm_rd32(device, 0x003280);
282 	bool handled = false;
283 
284 	switch (mthd) {
285 	case 0x0000 ... 0x0000: /* subchannel's engine -> software */
286 		nvkm_wr32(device, 0x003280, (engine &= ~mask));
287 		fallthrough;
288 	case 0x0180 ... 0x01fc: /* handle -> instance */
289 		data = nvkm_rd32(device, 0x003258) & 0x0000ffff;
290 		fallthrough;
291 	case 0x0100 ... 0x017c:
292 	case 0x0200 ... 0x1ffc: /* pass method down to sw */
293 		if (!(engine & mask) && sw)
294 			handled = nvkm_sw_mthd(sw, chid, subc, mthd, data);
295 		break;
296 	default:
297 		break;
298 	}
299 
300 	return handled;
301 }
302 
303 static void
304 nv04_fifo_intr_cache_error(struct nvkm_fifo *fifo, u32 chid, u32 get)
305 {
306 	struct nvkm_subdev *subdev = &fifo->engine.subdev;
307 	struct nvkm_device *device = subdev->device;
308 	struct nvkm_chan *chan;
309 	unsigned long flags;
310 	u32 pull0 = nvkm_rd32(device, 0x003250);
311 	u32 mthd, data;
312 	int ptr;
313 
314 	/* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my
315 	 * G80 chips, but CACHE1 isn't big enough for this much data.. Tests
316 	 * show that it wraps around to the start at GET=0x800.. No clue as to
317 	 * why..
318 	 */
319 	ptr = (get & 0x7ff) >> 2;
320 
321 	if (device->card_type < NV_40) {
322 		mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr));
323 		data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr));
324 	} else {
325 		mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr));
326 		data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr));
327 	}
328 
329 	if (!(pull0 & 0x00000100) ||
330 	    !nv04_fifo_swmthd(device, chid, mthd, data)) {
331 		chan = nvkm_chan_get_chid(&fifo->engine, chid, &flags);
332 		nvkm_error(subdev, "CACHE_ERROR - "
333 			   "ch %d [%s] subc %d mthd %04x data %08x\n",
334 			   chid, chan ? chan->name : "unknown",
335 			   (mthd >> 13) & 7, mthd & 0x1ffc, data);
336 		nvkm_chan_put(&chan, flags);
337 	}
338 
339 	nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
340 	nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
341 
342 	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
343 		nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1);
344 	nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
345 	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
346 		nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1);
347 	nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0);
348 
349 	nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH,
350 		nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
351 	nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
352 }
353 
354 static void
355 nv04_fifo_intr_dma_pusher(struct nvkm_fifo *fifo, u32 chid)
356 {
357 	struct nvkm_subdev *subdev = &fifo->engine.subdev;
358 	struct nvkm_device *device = subdev->device;
359 	u32 dma_get = nvkm_rd32(device, 0x003244);
360 	u32 dma_put = nvkm_rd32(device, 0x003240);
361 	u32 push = nvkm_rd32(device, 0x003220);
362 	u32 state = nvkm_rd32(device, 0x003228);
363 	struct nvkm_chan *chan;
364 	unsigned long flags;
365 	const char *name;
366 
367 	chan = nvkm_chan_get_chid(&fifo->engine, chid, &flags);
368 	name = chan ? chan->name : "unknown";
369 	if (device->card_type == NV_50) {
370 		u32 ho_get = nvkm_rd32(device, 0x003328);
371 		u32 ho_put = nvkm_rd32(device, 0x003320);
372 		u32 ib_get = nvkm_rd32(device, 0x003334);
373 		u32 ib_put = nvkm_rd32(device, 0x003330);
374 
375 		nvkm_error(subdev, "DMA_PUSHER - "
376 			   "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x "
377 			   "ib_put %08x state %08x (err: %s) push %08x\n",
378 			   chid, name, ho_get, dma_get, ho_put, dma_put,
379 			   ib_get, ib_put, state, nv_dma_state_err(state),
380 			   push);
381 
382 		/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
383 		nvkm_wr32(device, 0x003364, 0x00000000);
384 		if (dma_get != dma_put || ho_get != ho_put) {
385 			nvkm_wr32(device, 0x003244, dma_put);
386 			nvkm_wr32(device, 0x003328, ho_put);
387 		} else
388 		if (ib_get != ib_put)
389 			nvkm_wr32(device, 0x003334, ib_put);
390 	} else {
391 		nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x "
392 				   "state %08x (err: %s) push %08x\n",
393 			   chid, name, dma_get, dma_put, state,
394 			   nv_dma_state_err(state), push);
395 
396 		if (dma_get != dma_put)
397 			nvkm_wr32(device, 0x003244, dma_put);
398 	}
399 	nvkm_chan_put(&chan, flags);
400 
401 	nvkm_wr32(device, 0x003228, 0x00000000);
402 	nvkm_wr32(device, 0x003220, 0x00000001);
403 	nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
404 }
405 
406 irqreturn_t
407 nv04_fifo_intr(struct nvkm_inth *inth)
408 {
409 	struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth);
410 	struct nvkm_subdev *subdev = &fifo->engine.subdev;
411 	struct nvkm_device *device = subdev->device;
412 	u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0);
413 	u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask;
414 	u32 reassign, chid, get, sem;
415 
416 	reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1;
417 	nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
418 
419 	chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->chid->mask;
420 	get  = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET);
421 
422 	if (stat & NV_PFIFO_INTR_CACHE_ERROR) {
423 		nv04_fifo_intr_cache_error(fifo, chid, get);
424 		stat &= ~NV_PFIFO_INTR_CACHE_ERROR;
425 	}
426 
427 	if (stat & NV_PFIFO_INTR_DMA_PUSHER) {
428 		nv04_fifo_intr_dma_pusher(fifo, chid);
429 		stat &= ~NV_PFIFO_INTR_DMA_PUSHER;
430 	}
431 
432 	if (stat & NV_PFIFO_INTR_SEMAPHORE) {
433 		stat &= ~NV_PFIFO_INTR_SEMAPHORE;
434 		nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
435 
436 		sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE);
437 		nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
438 
439 		nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
440 		nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
441 	}
442 
443 	if (device->card_type == NV_50) {
444 		if (stat & 0x00000010) {
445 			stat &= ~0x00000010;
446 			nvkm_wr32(device, 0x002100, 0x00000010);
447 		}
448 
449 		if (stat & 0x40000000) {
450 			nvkm_wr32(device, 0x002100, 0x40000000);
451 			nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT);
452 			stat &= ~0x40000000;
453 		}
454 	}
455 
456 	if (stat) {
457 		nvkm_warn(subdev, "intr %08x\n", stat);
458 		nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
459 		nvkm_wr32(device, NV03_PFIFO_INTR_0, stat);
460 	}
461 
462 	nvkm_wr32(device, NV03_PFIFO_CACHES, reassign);
463 	return IRQ_HANDLED;
464 }
465 
466 void
467 nv04_fifo_init(struct nvkm_fifo *fifo)
468 {
469 	struct nvkm_device *device = fifo->engine.subdev.device;
470 	struct nvkm_instmem *imem = device->imem;
471 	struct nvkm_ramht *ramht = imem->ramht;
472 	struct nvkm_memory *ramro = imem->ramro;
473 	struct nvkm_memory *ramfc = imem->ramfc;
474 
475 	nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff);
476 	nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
477 
478 	nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
479 					    ((ramht->bits - 9) << 16) |
480 					    (ramht->gpuobj->addr >> 8));
481 	nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
482 	nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8);
483 
484 	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->chid->mask);
485 
486 	nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
487 	nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
488 
489 	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
490 	nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
491 	nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
492 }
493 
494 int
495 nv04_fifo_runl_ctor(struct nvkm_fifo *fifo)
496 {
497 	struct nvkm_runl *runl;
498 
499 	runl = nvkm_runl_new(fifo, 0, 0, 0);
500 	if (IS_ERR(runl))
501 		return PTR_ERR(runl);
502 
503 	nvkm_runl_add(runl, 0, fifo->func->engn_sw, NVKM_ENGINE_SW, 0);
504 	nvkm_runl_add(runl, 0, fifo->func->engn_sw, NVKM_ENGINE_DMAOBJ, 0);
505 	nvkm_runl_add(runl, 1, fifo->func->engn   , NVKM_ENGINE_GR, 0);
506 	nvkm_runl_add(runl, 2, fifo->func->engn   , NVKM_ENGINE_MPEG, 0); /* NV31- */
507 	return 0;
508 }
509 
510 int
511 nv04_fifo_chid_ctor(struct nvkm_fifo *fifo, int nr)
512 {
513 	/* The last CHID is reserved by HW as a "channel invalid" marker. */
514 	return nvkm_chid_new(&nvkm_chan_event, &fifo->engine.subdev, nr, 0, nr - 1, &fifo->chid);
515 }
516 
517 static int
518 nv04_fifo_chid_nr(struct nvkm_fifo *fifo)
519 {
520 	return 16;
521 }
522 
523 int
524 nv04_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
525 	       enum nvkm_subdev_type type, int inst, int nr, const struct nv04_fifo_ramfc *ramfc,
526 	       struct nvkm_fifo **pfifo)
527 {
528 	struct nv04_fifo *fifo;
529 	int ret;
530 
531 	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
532 		return -ENOMEM;
533 	*pfifo = &fifo->base;
534 
535 	ret = nvkm_fifo_ctor(func, device, type, inst, &fifo->base);
536 	if (ret)
537 		return ret;
538 
539 	return 0;
540 }
541 
542 static const struct nvkm_fifo_func
543 nv04_fifo = {
544 	.chid_nr = nv04_fifo_chid_nr,
545 	.chid_ctor = nv04_fifo_chid_ctor,
546 	.runl_ctor = nv04_fifo_runl_ctor,
547 	.init = nv04_fifo_init,
548 	.intr = nv04_fifo_intr,
549 	.pause = nv04_fifo_pause,
550 	.start = nv04_fifo_start,
551 	.runl = &nv04_runl,
552 	.engn = &nv04_engn,
553 	.engn_sw = &nv04_engn,
554 	.cgrp = {{                        }, &nv04_cgrp },
555 	.chan = {{ 0, 0, NV03_CHANNEL_DMA }, &nv04_chan, .oclass = &nv04_fifo_dma_oclass },
556 };
557 
558 int
559 nv04_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
560 	      struct nvkm_fifo **pfifo)
561 {
562 	return nv04_fifo_new_(&nv04_fifo, device, type, inst, 0, NULL, pfifo);
563 }
564