1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "gk104.h"
25 
26 #include <core/client.h>
27 #include <core/engctx.h>
28 #include <core/enum.h>
29 #include <core/handle.h>
30 #include <subdev/bar.h>
31 #include <subdev/fb.h>
32 #include <subdev/mmu.h>
33 #include <subdev/timer.h>
34 
35 #include <nvif/class.h>
36 #include <nvif/ioctl.h>
37 #include <nvif/unpack.h>
38 
39 #define _(a,b) { (a), ((1ULL << (a)) | (b)) }
40 static const struct {
41 	u64 subdev;
42 	u64 mask;
43 } fifo_engine[] = {
44 	_(NVDEV_ENGINE_GR      , (1ULL << NVDEV_ENGINE_SW) |
45 				 (1ULL << NVDEV_ENGINE_CE2)),
46 	_(NVDEV_ENGINE_MSPDEC  , 0),
47 	_(NVDEV_ENGINE_MSPPP   , 0),
48 	_(NVDEV_ENGINE_MSVLD   , 0),
49 	_(NVDEV_ENGINE_CE0     , 0),
50 	_(NVDEV_ENGINE_CE1     , 0),
51 	_(NVDEV_ENGINE_MSENC   , 0),
52 };
53 #undef _
54 #define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
55 
56 struct gk104_fifo_engn {
57 	struct nvkm_gpuobj *runlist[2];
58 	int cur_runlist;
59 	wait_queue_head_t wait;
60 };
61 
62 struct gk104_fifo {
63 	struct nvkm_fifo base;
64 
65 	struct work_struct fault;
66 	u64 mask;
67 
68 	struct gk104_fifo_engn engine[FIFO_ENGINE_NR];
69 	struct {
70 		struct nvkm_gpuobj *mem;
71 		struct nvkm_vma bar;
72 	} user;
73 	int spoon_nr;
74 };
75 
76 struct gk104_fifo_base {
77 	struct nvkm_fifo_base base;
78 	struct nvkm_gpuobj *pgd;
79 	struct nvkm_vm *vm;
80 };
81 
82 struct gk104_fifo_chan {
83 	struct nvkm_fifo_chan base;
84 	u32 engine;
85 	enum {
86 		STOPPED,
87 		RUNNING,
88 		KILLED
89 	} state;
90 };
91 
92 /*******************************************************************************
93  * FIFO channel objects
94  ******************************************************************************/
95 
96 static void
97 gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
98 {
99 	struct gk104_fifo_engn *engn = &fifo->engine[engine];
100 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
101 	struct nvkm_device *device = subdev->device;
102 	struct nvkm_bar *bar = device->bar;
103 	struct nvkm_gpuobj *cur;
104 	int i, p;
105 
106 	mutex_lock(&nv_subdev(fifo)->mutex);
107 	cur = engn->runlist[engn->cur_runlist];
108 	engn->cur_runlist = !engn->cur_runlist;
109 
110 	nvkm_kmap(cur);
111 	for (i = 0, p = 0; i < fifo->base.max; i++) {
112 		struct gk104_fifo_chan *chan = (void *)fifo->base.channel[i];
113 		if (chan && chan->state == RUNNING && chan->engine == engine) {
114 			nvkm_wo32(cur, p + 0, i);
115 			nvkm_wo32(cur, p + 4, 0x00000000);
116 			p += 8;
117 		}
118 	}
119 	bar->flush(bar);
120 	nvkm_done(cur);
121 
122 	nvkm_wr32(device, 0x002270, cur->addr >> 12);
123 	nvkm_wr32(device, 0x002274, (engine << 20) | (p >> 3));
124 
125 	if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
126 			       (engine * 0x08)) & 0x00100000),
127 				msecs_to_jiffies(2000)) == 0)
128 		nvkm_error(subdev, "runlist %d update timeout\n", engine);
129 	mutex_unlock(&nv_subdev(fifo)->mutex);
130 }
131 
132 static int
133 gk104_fifo_context_attach(struct nvkm_object *parent,
134 			  struct nvkm_object *object)
135 {
136 	struct nvkm_bar *bar = nvkm_bar(parent);
137 	struct gk104_fifo_base *base = (void *)parent->parent;
138 	struct nvkm_gpuobj *engn = &base->base.gpuobj;
139 	struct nvkm_engctx *ectx = (void *)object;
140 	u32 addr;
141 	int ret;
142 
143 	switch (nv_engidx(object->engine)) {
144 	case NVDEV_ENGINE_SW   :
145 		return 0;
146 	case NVDEV_ENGINE_CE0:
147 	case NVDEV_ENGINE_CE1:
148 	case NVDEV_ENGINE_CE2:
149 		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
150 		return 0;
151 	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
152 	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
153 	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
154 	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
155 	default:
156 		return -EINVAL;
157 	}
158 
159 	if (!ectx->vma.node) {
160 		ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
161 					 NV_MEM_ACCESS_RW, &ectx->vma);
162 		if (ret)
163 			return ret;
164 
165 		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
166 	}
167 
168 	nvkm_kmap(engn);
169 	nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
170 	nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset));
171 	bar->flush(bar);
172 	nvkm_done(engn);
173 	return 0;
174 }
175 
176 static int
177 gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
178 {
179 	struct nvkm_object *obj = (void *)chan;
180 	struct gk104_fifo *fifo = (void *)obj->engine;
181 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
182 	struct nvkm_device *device = subdev->device;
183 
184 	nvkm_wr32(device, 0x002634, chan->base.chid);
185 	if (nvkm_msec(device, 2000,
186 		if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
187 			break;
188 	) < 0) {
189 		nvkm_error(subdev, "channel %d [%s] kick timeout\n",
190 			   chan->base.chid, nvkm_client_name(chan));
191 		return -EBUSY;
192 	}
193 
194 	return 0;
195 }
196 
197 static int
198 gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
199 			  struct nvkm_object *object)
200 {
201 	struct nvkm_bar *bar = nvkm_bar(parent);
202 	struct gk104_fifo_base *base = (void *)parent->parent;
203 	struct gk104_fifo_chan *chan = (void *)parent;
204 	struct nvkm_gpuobj *engn = &base->base.gpuobj;
205 	u32 addr;
206 	int ret;
207 
208 	switch (nv_engidx(object->engine)) {
209 	case NVDEV_ENGINE_SW    : return 0;
210 	case NVDEV_ENGINE_CE0   :
211 	case NVDEV_ENGINE_CE1   :
212 	case NVDEV_ENGINE_CE2   : addr = 0x0000; break;
213 	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
214 	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
215 	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
216 	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
217 	default:
218 		return -EINVAL;
219 	}
220 
221 	ret = gk104_fifo_chan_kick(chan);
222 	if (ret && suspend)
223 		return ret;
224 
225 	if (addr) {
226 		nvkm_kmap(engn);
227 		nvkm_wo32(engn, addr + 0x00, 0x00000000);
228 		nvkm_wo32(engn, addr + 0x04, 0x00000000);
229 		bar->flush(bar);
230 		nvkm_done(engn);
231 	}
232 
233 	return 0;
234 }
235 
236 static int
237 gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
238 		     struct nvkm_oclass *oclass, void *data, u32 size,
239 		     struct nvkm_object **pobject)
240 {
241 	union {
242 		struct kepler_channel_gpfifo_a_v0 v0;
243 	} *args = data;
244 	struct nvkm_bar *bar = nvkm_bar(parent);
245 	struct gk104_fifo *fifo = (void *)engine;
246 	struct gk104_fifo_base *base = (void *)parent;
247 	struct gk104_fifo_chan *chan;
248 	struct nvkm_gpuobj *ramfc = &base->base.gpuobj;
249 	u64 usermem, ioffset, ilength;
250 	u32 engines;
251 	int ret, i;
252 
253 	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
254 	if (nvif_unpack(args->v0, 0, 0, false)) {
255 		nvif_ioctl(parent, "create channel gpfifo vers %d "
256 				   "ioffset %016llx ilength %08x engine %08x\n",
257 			   args->v0.version, args->v0.ioffset,
258 			   args->v0.ilength, args->v0.engine);
259 		if (args->v0.vm)
260 			return -ENOENT;
261 	} else
262 		return ret;
263 
264 	for (i = 0, engines = 0; i < FIFO_ENGINE_NR; i++) {
265 		if (!nvkm_engine(parent, fifo_engine[i].subdev))
266 			continue;
267 		engines |= (1 << i);
268 	}
269 
270 	if (!args->v0.engine) {
271 		static struct nvkm_oclass oclass = {
272 			.ofuncs = &nvkm_object_ofuncs,
273 		};
274 		args->v0.engine = engines;
275 		return nvkm_object_old(parent, engine, &oclass, NULL, 0, pobject);
276 	}
277 
278 	engines &= args->v0.engine;
279 	if (!engines) {
280 		nvif_ioctl(parent, "unsupported engines %08x\n",
281 			   args->v0.engine);
282 		return -ENODEV;
283 	}
284 	i = __ffs(engines);
285 
286 	ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
287 				       fifo->user.bar.offset, 0x200, 0,
288 				       fifo_engine[i].mask, &chan);
289 	*pobject = nv_object(chan);
290 	if (ret)
291 		return ret;
292 
293 	args->v0.chid = chan->base.chid;
294 
295 	nv_parent(chan)->context_attach = gk104_fifo_context_attach;
296 	nv_parent(chan)->context_detach = gk104_fifo_context_detach;
297 	chan->engine = i;
298 
299 	usermem = chan->base.chid * 0x200;
300 	ioffset = args->v0.ioffset;
301 	ilength = order_base_2(args->v0.ilength / 8);
302 
303 	nvkm_kmap(fifo->user.mem);
304 	for (i = 0; i < 0x200; i += 4)
305 		nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
306 	nvkm_done(fifo->user.mem);
307 
308 	nvkm_kmap(ramfc);
309 	nvkm_wo32(ramfc, 0x08, lower_32_bits(fifo->user.mem->addr + usermem));
310 	nvkm_wo32(ramfc, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem));
311 	nvkm_wo32(ramfc, 0x10, 0x0000face);
312 	nvkm_wo32(ramfc, 0x30, 0xfffff902);
313 	nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset));
314 	nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
315 	nvkm_wo32(ramfc, 0x84, 0x20400000);
316 	nvkm_wo32(ramfc, 0x94, 0x30000001);
317 	nvkm_wo32(ramfc, 0x9c, 0x00000100);
318 	nvkm_wo32(ramfc, 0xac, 0x0000001f);
319 	nvkm_wo32(ramfc, 0xe8, chan->base.chid);
320 	nvkm_wo32(ramfc, 0xb8, 0xf8000000);
321 	nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */
322 	nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */
323 	bar->flush(bar);
324 	nvkm_done(ramfc);
325 	return 0;
326 }
327 
328 static int
329 gk104_fifo_chan_init(struct nvkm_object *object)
330 {
331 	struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
332 	struct gk104_fifo *fifo = (void *)object->engine;
333 	struct gk104_fifo_chan *chan = (void *)object;
334 	struct nvkm_device *device = fifo->base.engine.subdev.device;
335 	u32 chid = chan->base.chid;
336 	int ret;
337 
338 	ret = nvkm_fifo_channel_init(&chan->base);
339 	if (ret)
340 		return ret;
341 
342 	nvkm_mask(device, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
343 	nvkm_wr32(device, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
344 
345 	if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
346 		nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
347 		gk104_fifo_runlist_update(fifo, chan->engine);
348 		nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
349 	}
350 
351 	return 0;
352 }
353 
354 static int
355 gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
356 {
357 	struct gk104_fifo *fifo = (void *)object->engine;
358 	struct gk104_fifo_chan *chan = (void *)object;
359 	struct nvkm_device *device = fifo->base.engine.subdev.device;
360 	u32 chid = chan->base.chid;
361 
362 	if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
363 		nvkm_mask(device, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
364 		gk104_fifo_runlist_update(fifo, chan->engine);
365 	}
366 
367 	nvkm_wr32(device, 0x800000 + (chid * 8), 0x00000000);
368 	return nvkm_fifo_channel_fini(&chan->base, suspend);
369 }
370 
371 struct nvkm_ofuncs
372 gk104_fifo_chan_ofuncs = {
373 	.ctor = gk104_fifo_chan_ctor,
374 	.dtor = _nvkm_fifo_channel_dtor,
375 	.init = gk104_fifo_chan_init,
376 	.fini = gk104_fifo_chan_fini,
377 	.map  = _nvkm_fifo_channel_map,
378 	.rd32 = _nvkm_fifo_channel_rd32,
379 	.wr32 = _nvkm_fifo_channel_wr32,
380 	.ntfy = _nvkm_fifo_channel_ntfy
381 };
382 
383 static struct nvkm_oclass
384 gk104_fifo_sclass[] = {
385 	{ KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
386 	{}
387 };
388 
389 /*******************************************************************************
390  * FIFO context - instmem heap and vm setup
391  ******************************************************************************/
392 
393 static int
394 gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
395 			struct nvkm_oclass *oclass, void *data, u32 size,
396 			struct nvkm_object **pobject)
397 {
398 	struct gk104_fifo_base *base;
399 	int ret;
400 
401 	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
402 				       0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
403 	*pobject = nv_object(base);
404 	if (ret)
405 		return ret;
406 
407 	ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
408 			      &base->pgd);
409 	if (ret)
410 		return ret;
411 
412 	nvkm_kmap(&base->base.gpuobj);
413 	nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr));
414 	nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr));
415 	nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff);
416 	nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff);
417 	nvkm_done(&base->base.gpuobj);
418 
419 	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
420 	if (ret)
421 		return ret;
422 
423 	return 0;
424 }
425 
426 static void
427 gk104_fifo_context_dtor(struct nvkm_object *object)
428 {
429 	struct gk104_fifo_base *base = (void *)object;
430 	nvkm_vm_ref(NULL, &base->vm, base->pgd);
431 	nvkm_gpuobj_ref(NULL, &base->pgd);
432 	nvkm_fifo_context_destroy(&base->base);
433 }
434 
435 static struct nvkm_oclass
436 gk104_fifo_cclass = {
437 	.handle = NV_ENGCTX(FIFO, 0xe0),
438 	.ofuncs = &(struct nvkm_ofuncs) {
439 		.ctor = gk104_fifo_context_ctor,
440 		.dtor = gk104_fifo_context_dtor,
441 		.init = _nvkm_fifo_context_init,
442 		.fini = _nvkm_fifo_context_fini,
443 		.rd32 = _nvkm_fifo_context_rd32,
444 		.wr32 = _nvkm_fifo_context_wr32,
445 	},
446 };
447 
448 /*******************************************************************************
449  * PFIFO engine
450  ******************************************************************************/
451 
452 static inline int
453 gk104_fifo_engidx(struct gk104_fifo *fifo, u32 engn)
454 {
455 	switch (engn) {
456 	case NVDEV_ENGINE_GR    :
457 	case NVDEV_ENGINE_CE2   : engn = 0; break;
458 	case NVDEV_ENGINE_MSVLD : engn = 1; break;
459 	case NVDEV_ENGINE_MSPPP : engn = 2; break;
460 	case NVDEV_ENGINE_MSPDEC: engn = 3; break;
461 	case NVDEV_ENGINE_CE0   : engn = 4; break;
462 	case NVDEV_ENGINE_CE1   : engn = 5; break;
463 	case NVDEV_ENGINE_MSENC : engn = 6; break;
464 	default:
465 		return -1;
466 	}
467 
468 	return engn;
469 }
470 
471 static inline struct nvkm_engine *
472 gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
473 {
474 	if (engn >= ARRAY_SIZE(fifo_engine))
475 		return NULL;
476 	return nvkm_engine(fifo, fifo_engine[engn].subdev);
477 }
478 
479 static void
480 gk104_fifo_recover_work(struct work_struct *work)
481 {
482 	struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault);
483 	struct nvkm_device *device = fifo->base.engine.subdev.device;
484 	struct nvkm_object *engine;
485 	unsigned long flags;
486 	u32 engn, engm = 0;
487 	u64 mask, todo;
488 
489 	spin_lock_irqsave(&fifo->base.lock, flags);
490 	mask = fifo->mask;
491 	fifo->mask = 0ULL;
492 	spin_unlock_irqrestore(&fifo->base.lock, flags);
493 
494 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
495 		engm |= 1 << gk104_fifo_engidx(fifo, engn);
496 	nvkm_mask(device, 0x002630, engm, engm);
497 
498 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
499 		if ((engine = (void *)nvkm_engine(fifo, engn))) {
500 			nv_ofuncs(engine)->fini(engine, false);
501 			WARN_ON(nv_ofuncs(engine)->init(engine));
502 		}
503 		gk104_fifo_runlist_update(fifo, gk104_fifo_engidx(fifo, engn));
504 	}
505 
506 	nvkm_wr32(device, 0x00262c, engm);
507 	nvkm_mask(device, 0x002630, engm, 0x00000000);
508 }
509 
510 static void
511 gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
512 		  struct gk104_fifo_chan *chan)
513 {
514 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
515 	struct nvkm_device *device = subdev->device;
516 	u32 chid = chan->base.chid;
517 	unsigned long flags;
518 
519 	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
520 		   nv_subdev(engine)->name, chid);
521 
522 	nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
523 	chan->state = KILLED;
524 
525 	spin_lock_irqsave(&fifo->base.lock, flags);
526 	fifo->mask |= 1ULL << nv_engidx(engine);
527 	spin_unlock_irqrestore(&fifo->base.lock, flags);
528 	schedule_work(&fifo->fault);
529 }
530 
531 static int
532 gk104_fifo_swmthd(struct gk104_fifo *fifo, u32 chid, u32 mthd, u32 data)
533 {
534 	struct gk104_fifo_chan *chan = NULL;
535 	struct nvkm_handle *bind;
536 	unsigned long flags;
537 	int ret = -EINVAL;
538 
539 	spin_lock_irqsave(&fifo->base.lock, flags);
540 	if (likely(chid >= fifo->base.min && chid <= fifo->base.max))
541 		chan = (void *)fifo->base.channel[chid];
542 	if (unlikely(!chan))
543 		goto out;
544 
545 	bind = nvkm_namedb_get_class(nv_namedb(chan), NVIF_IOCTL_NEW_V0_SW_GF100);
546 	if (likely(bind)) {
547 		if (!mthd || !nv_call(bind->object, mthd, data))
548 			ret = 0;
549 		nvkm_namedb_put(bind);
550 	}
551 
552 out:
553 	spin_unlock_irqrestore(&fifo->base.lock, flags);
554 	return ret;
555 }
556 
557 static const struct nvkm_enum
558 gk104_fifo_bind_reason[] = {
559 	{ 0x01, "BIND_NOT_UNBOUND" },
560 	{ 0x02, "SNOOP_WITHOUT_BAR1" },
561 	{ 0x03, "UNBIND_WHILE_RUNNING" },
562 	{ 0x05, "INVALID_RUNLIST" },
563 	{ 0x06, "INVALID_CTX_TGT" },
564 	{ 0x0b, "UNBIND_WHILE_PARKED" },
565 	{}
566 };
567 
568 static void
569 gk104_fifo_intr_bind(struct gk104_fifo *fifo)
570 {
571 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
572 	struct nvkm_device *device = subdev->device;
573 	u32 intr = nvkm_rd32(device, 0x00252c);
574 	u32 code = intr & 0x000000ff;
575 	const struct nvkm_enum *en =
576 		nvkm_enum_find(gk104_fifo_bind_reason, code);
577 
578 	nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
579 }
580 
581 static const struct nvkm_enum
582 gk104_fifo_sched_reason[] = {
583 	{ 0x0a, "CTXSW_TIMEOUT" },
584 	{}
585 };
586 
587 static void
588 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
589 {
590 	struct nvkm_device *device = fifo->base.engine.subdev.device;
591 	struct nvkm_engine *engine;
592 	struct gk104_fifo_chan *chan;
593 	u32 engn;
594 
595 	for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) {
596 		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
597 		u32 busy = (stat & 0x80000000);
598 		u32 next = (stat & 0x07ff0000) >> 16;
599 		u32 chsw = (stat & 0x00008000);
600 		u32 save = (stat & 0x00004000);
601 		u32 load = (stat & 0x00002000);
602 		u32 prev = (stat & 0x000007ff);
603 		u32 chid = load ? next : prev;
604 		(void)save;
605 
606 		if (busy && chsw) {
607 			if (!(chan = (void *)fifo->base.channel[chid]))
608 				continue;
609 			if (!(engine = gk104_fifo_engine(fifo, engn)))
610 				continue;
611 			gk104_fifo_recover(fifo, engine, chan);
612 		}
613 	}
614 }
615 
616 static void
617 gk104_fifo_intr_sched(struct gk104_fifo *fifo)
618 {
619 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
620 	struct nvkm_device *device = subdev->device;
621 	u32 intr = nvkm_rd32(device, 0x00254c);
622 	u32 code = intr & 0x000000ff;
623 	const struct nvkm_enum *en =
624 		nvkm_enum_find(gk104_fifo_sched_reason, code);
625 
626 	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
627 
628 	switch (code) {
629 	case 0x0a:
630 		gk104_fifo_intr_sched_ctxsw(fifo);
631 		break;
632 	default:
633 		break;
634 	}
635 }
636 
637 static void
638 gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
639 {
640 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
641 	struct nvkm_device *device = subdev->device;
642 	u32 stat = nvkm_rd32(device, 0x00256c);
643 	nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
644 	nvkm_wr32(device, 0x00256c, stat);
645 }
646 
647 static void
648 gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
649 {
650 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
651 	struct nvkm_device *device = subdev->device;
652 	u32 stat = nvkm_rd32(device, 0x00259c);
653 	nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
654 }
655 
656 static const struct nvkm_enum
657 gk104_fifo_fault_engine[] = {
658 	{ 0x00, "GR", NULL, NVDEV_ENGINE_GR },
659 	{ 0x03, "IFB", NULL, NVDEV_ENGINE_IFB },
660 	{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
661 	{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
662 	{ 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
663 	{ 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
664 	{ 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
665 	{ 0x10, "MSVLD", NULL, NVDEV_ENGINE_MSVLD },
666 	{ 0x11, "MSPPP", NULL, NVDEV_ENGINE_MSPPP },
667 	{ 0x13, "PERF" },
668 	{ 0x14, "MSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
669 	{ 0x15, "CE0", NULL, NVDEV_ENGINE_CE0 },
670 	{ 0x16, "CE1", NULL, NVDEV_ENGINE_CE1 },
671 	{ 0x17, "PMU" },
672 	{ 0x19, "MSENC", NULL, NVDEV_ENGINE_MSENC },
673 	{ 0x1b, "CE2", NULL, NVDEV_ENGINE_CE2 },
674 	{}
675 };
676 
677 static const struct nvkm_enum
678 gk104_fifo_fault_reason[] = {
679 	{ 0x00, "PDE" },
680 	{ 0x01, "PDE_SIZE" },
681 	{ 0x02, "PTE" },
682 	{ 0x03, "VA_LIMIT_VIOLATION" },
683 	{ 0x04, "UNBOUND_INST_BLOCK" },
684 	{ 0x05, "PRIV_VIOLATION" },
685 	{ 0x06, "RO_VIOLATION" },
686 	{ 0x07, "WO_VIOLATION" },
687 	{ 0x08, "PITCH_MASK_VIOLATION" },
688 	{ 0x09, "WORK_CREATION" },
689 	{ 0x0a, "UNSUPPORTED_APERTURE" },
690 	{ 0x0b, "COMPRESSION_FAILURE" },
691 	{ 0x0c, "UNSUPPORTED_KIND" },
692 	{ 0x0d, "REGION_VIOLATION" },
693 	{ 0x0e, "BOTH_PTES_VALID" },
694 	{ 0x0f, "INFO_TYPE_POISONED" },
695 	{}
696 };
697 
698 static const struct nvkm_enum
699 gk104_fifo_fault_hubclient[] = {
700 	{ 0x00, "VIP" },
701 	{ 0x01, "CE0" },
702 	{ 0x02, "CE1" },
703 	{ 0x03, "DNISO" },
704 	{ 0x04, "FE" },
705 	{ 0x05, "FECS" },
706 	{ 0x06, "HOST" },
707 	{ 0x07, "HOST_CPU" },
708 	{ 0x08, "HOST_CPU_NB" },
709 	{ 0x09, "ISO" },
710 	{ 0x0a, "MMU" },
711 	{ 0x0b, "MSPDEC" },
712 	{ 0x0c, "MSPPP" },
713 	{ 0x0d, "MSVLD" },
714 	{ 0x0e, "NISO" },
715 	{ 0x0f, "P2P" },
716 	{ 0x10, "PD" },
717 	{ 0x11, "PERF" },
718 	{ 0x12, "PMU" },
719 	{ 0x13, "RASTERTWOD" },
720 	{ 0x14, "SCC" },
721 	{ 0x15, "SCC_NB" },
722 	{ 0x16, "SEC" },
723 	{ 0x17, "SSYNC" },
724 	{ 0x18, "GR_CE" },
725 	{ 0x19, "CE2" },
726 	{ 0x1a, "XV" },
727 	{ 0x1b, "MMU_NB" },
728 	{ 0x1c, "MSENC" },
729 	{ 0x1d, "DFALCON" },
730 	{ 0x1e, "SKED" },
731 	{ 0x1f, "AFALCON" },
732 	{}
733 };
734 
735 static const struct nvkm_enum
736 gk104_fifo_fault_gpcclient[] = {
737 	{ 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
738 	{ 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
739 	{ 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
740 	{ 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
741 	{ 0x0c, "RAST" },
742 	{ 0x0d, "GCC" },
743 	{ 0x0e, "GPCCS" },
744 	{ 0x0f, "PROP_0" },
745 	{ 0x10, "PROP_1" },
746 	{ 0x11, "PROP_2" },
747 	{ 0x12, "PROP_3" },
748 	{ 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
749 	{ 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
750 	{ 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
751 	{ 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
752 	{ 0x1f, "GPM" },
753 	{ 0x20, "LTP_UTLB_0" },
754 	{ 0x21, "LTP_UTLB_1" },
755 	{ 0x22, "LTP_UTLB_2" },
756 	{ 0x23, "LTP_UTLB_3" },
757 	{ 0x24, "GPC_RGG_UTLB" },
758 	{}
759 };
760 
761 static void
762 gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
763 {
764 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
765 	struct nvkm_device *device = subdev->device;
766 	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
767 	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
768 	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
769 	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
770 	u32 gpc    = (stat & 0x1f000000) >> 24;
771 	u32 client = (stat & 0x00001f00) >> 8;
772 	u32 write  = (stat & 0x00000080);
773 	u32 hub    = (stat & 0x00000040);
774 	u32 reason = (stat & 0x0000000f);
775 	struct nvkm_object *engctx = NULL, *object;
776 	struct nvkm_engine *engine = NULL;
777 	const struct nvkm_enum *er, *eu, *ec;
778 	char gpcid[8] = "";
779 
780 	er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
781 	eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
782 	if (hub) {
783 		ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
784 	} else {
785 		ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
786 		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
787 	}
788 
789 	if (eu) {
790 		switch (eu->data2) {
791 		case NVDEV_SUBDEV_BAR:
792 			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
793 			break;
794 		case NVDEV_SUBDEV_INSTMEM:
795 			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
796 			break;
797 		case NVDEV_ENGINE_IFB:
798 			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
799 			break;
800 		default:
801 			engine = nvkm_engine(fifo, eu->data2);
802 			if (engine)
803 				engctx = nvkm_engctx_get(engine, inst);
804 			break;
805 		}
806 	}
807 
808 	nvkm_error(subdev,
809 		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
810 		   "reason %02x [%s] on channel %d [%010llx %s]\n",
811 		   write ? "write" : "read", (u64)vahi << 32 | valo,
812 		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
813 		   reason, er ? er->name : "", -1, (u64)inst << 12,
814 		   nvkm_client_name(engctx));
815 
816 	object = engctx;
817 	while (object) {
818 		switch (nv_mclass(object)) {
819 		case KEPLER_CHANNEL_GPFIFO_A:
820 		case MAXWELL_CHANNEL_GPFIFO_A:
821 			gk104_fifo_recover(fifo, engine, (void *)object);
822 			break;
823 		}
824 		object = object->parent;
825 	}
826 
827 	nvkm_engctx_put(engctx);
828 }
829 
830 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
831 	{ 0x00000001, "MEMREQ" },
832 	{ 0x00000002, "MEMACK_TIMEOUT" },
833 	{ 0x00000004, "MEMACK_EXTRA" },
834 	{ 0x00000008, "MEMDAT_TIMEOUT" },
835 	{ 0x00000010, "MEMDAT_EXTRA" },
836 	{ 0x00000020, "MEMFLUSH" },
837 	{ 0x00000040, "MEMOP" },
838 	{ 0x00000080, "LBCONNECT" },
839 	{ 0x00000100, "LBREQ" },
840 	{ 0x00000200, "LBACK_TIMEOUT" },
841 	{ 0x00000400, "LBACK_EXTRA" },
842 	{ 0x00000800, "LBDAT_TIMEOUT" },
843 	{ 0x00001000, "LBDAT_EXTRA" },
844 	{ 0x00002000, "GPFIFO" },
845 	{ 0x00004000, "GPPTR" },
846 	{ 0x00008000, "GPENTRY" },
847 	{ 0x00010000, "GPCRC" },
848 	{ 0x00020000, "PBPTR" },
849 	{ 0x00040000, "PBENTRY" },
850 	{ 0x00080000, "PBCRC" },
851 	{ 0x00100000, "XBARCONNECT" },
852 	{ 0x00200000, "METHOD" },
853 	{ 0x00400000, "METHODCRC" },
854 	{ 0x00800000, "DEVICE" },
855 	{ 0x02000000, "SEMAPHORE" },
856 	{ 0x04000000, "ACQUIRE" },
857 	{ 0x08000000, "PRI" },
858 	{ 0x20000000, "NO_CTXSW_SEG" },
859 	{ 0x40000000, "PBSEG" },
860 	{ 0x80000000, "SIGNATURE" },
861 	{}
862 };
863 
864 static void
865 gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
866 {
867 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
868 	struct nvkm_device *device = subdev->device;
869 	u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
870 	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
871 	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
872 	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
873 	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
874 	u32 subc = (addr & 0x00070000) >> 16;
875 	u32 mthd = (addr & 0x00003ffc);
876 	u32 show = stat;
877 	char msg[128];
878 
879 	if (stat & 0x00800000) {
880 		if (!gk104_fifo_swmthd(fifo, chid, mthd, data))
881 			show &= ~0x00800000;
882 		nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
883 	}
884 
885 	if (show) {
886 		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
887 		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%s] subc %d "
888 				   "mthd %04x data %08x\n",
889 			   unit, show, msg, chid,
890 			   nvkm_client_name_for_fifo_chid(&fifo->base, chid),
891 			   subc, mthd, data);
892 	}
893 
894 	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
895 }
896 
897 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
898 	{ 0x00000001, "HCE_RE_ILLEGAL_OP" },
899 	{ 0x00000002, "HCE_RE_ALIGNB" },
900 	{ 0x00000004, "HCE_PRIV" },
901 	{ 0x00000008, "HCE_ILLEGAL_MTHD" },
902 	{ 0x00000010, "HCE_ILLEGAL_CLASS" },
903 	{}
904 };
905 
906 static void
907 gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
908 {
909 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
910 	struct nvkm_device *device = subdev->device;
911 	u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
912 	u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
913 	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
914 	char msg[128];
915 
916 	if (stat) {
917 		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
918 		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
919 			   unit, stat, msg, chid,
920 			   nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
921 			   nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
922 	}
923 
924 	nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
925 }
926 
927 static void
928 gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
929 {
930 	struct nvkm_device *device = fifo->base.engine.subdev.device;
931 	u32 mask = nvkm_rd32(device, 0x002a00);
932 	while (mask) {
933 		u32 engn = __ffs(mask);
934 		wake_up(&fifo->engine[engn].wait);
935 		nvkm_wr32(device, 0x002a00, 1 << engn);
936 		mask &= ~(1 << engn);
937 	}
938 }
939 
940 static void
941 gk104_fifo_intr_engine(struct gk104_fifo *fifo)
942 {
943 	nvkm_fifo_uevent(&fifo->base);
944 }
945 
946 static void
947 gk104_fifo_intr(struct nvkm_subdev *subdev)
948 {
949 	struct gk104_fifo *fifo = (void *)subdev;
950 	struct nvkm_device *device = fifo->base.engine.subdev.device;
951 	u32 mask = nvkm_rd32(device, 0x002140);
952 	u32 stat = nvkm_rd32(device, 0x002100) & mask;
953 
954 	if (stat & 0x00000001) {
955 		gk104_fifo_intr_bind(fifo);
956 		nvkm_wr32(device, 0x002100, 0x00000001);
957 		stat &= ~0x00000001;
958 	}
959 
960 	if (stat & 0x00000010) {
961 		nvkm_error(subdev, "PIO_ERROR\n");
962 		nvkm_wr32(device, 0x002100, 0x00000010);
963 		stat &= ~0x00000010;
964 	}
965 
966 	if (stat & 0x00000100) {
967 		gk104_fifo_intr_sched(fifo);
968 		nvkm_wr32(device, 0x002100, 0x00000100);
969 		stat &= ~0x00000100;
970 	}
971 
972 	if (stat & 0x00010000) {
973 		gk104_fifo_intr_chsw(fifo);
974 		nvkm_wr32(device, 0x002100, 0x00010000);
975 		stat &= ~0x00010000;
976 	}
977 
978 	if (stat & 0x00800000) {
979 		nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
980 		nvkm_wr32(device, 0x002100, 0x00800000);
981 		stat &= ~0x00800000;
982 	}
983 
984 	if (stat & 0x01000000) {
985 		nvkm_error(subdev, "LB_ERROR\n");
986 		nvkm_wr32(device, 0x002100, 0x01000000);
987 		stat &= ~0x01000000;
988 	}
989 
990 	if (stat & 0x08000000) {
991 		gk104_fifo_intr_dropped_fault(fifo);
992 		nvkm_wr32(device, 0x002100, 0x08000000);
993 		stat &= ~0x08000000;
994 	}
995 
996 	if (stat & 0x10000000) {
997 		u32 mask = nvkm_rd32(device, 0x00259c);
998 		while (mask) {
999 			u32 unit = __ffs(mask);
1000 			gk104_fifo_intr_fault(fifo, unit);
1001 			nvkm_wr32(device, 0x00259c, (1 << unit));
1002 			mask &= ~(1 << unit);
1003 		}
1004 		stat &= ~0x10000000;
1005 	}
1006 
1007 	if (stat & 0x20000000) {
1008 		u32 mask = nvkm_rd32(device, 0x0025a0);
1009 		while (mask) {
1010 			u32 unit = __ffs(mask);
1011 			gk104_fifo_intr_pbdma_0(fifo, unit);
1012 			gk104_fifo_intr_pbdma_1(fifo, unit);
1013 			nvkm_wr32(device, 0x0025a0, (1 << unit));
1014 			mask &= ~(1 << unit);
1015 		}
1016 		stat &= ~0x20000000;
1017 	}
1018 
1019 	if (stat & 0x40000000) {
1020 		gk104_fifo_intr_runlist(fifo);
1021 		stat &= ~0x40000000;
1022 	}
1023 
1024 	if (stat & 0x80000000) {
1025 		nvkm_wr32(device, 0x002100, 0x80000000);
1026 		gk104_fifo_intr_engine(fifo);
1027 		stat &= ~0x80000000;
1028 	}
1029 
1030 	if (stat) {
1031 		nvkm_error(subdev, "INTR %08x\n", stat);
1032 		nvkm_mask(device, 0x002140, stat, 0x00000000);
1033 		nvkm_wr32(device, 0x002100, stat);
1034 	}
1035 }
1036 
1037 static void
1038 gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index)
1039 {
1040 	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
1041 	struct nvkm_device *device = fifo->engine.subdev.device;
1042 	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
1043 }
1044 
1045 static void
1046 gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
1047 {
1048 	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
1049 	struct nvkm_device *device = fifo->engine.subdev.device;
1050 	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
1051 }
1052 
1053 static const struct nvkm_event_func
1054 gk104_fifo_uevent_func = {
1055 	.ctor = nvkm_fifo_uevent_ctor,
1056 	.init = gk104_fifo_uevent_init,
1057 	.fini = gk104_fifo_uevent_fini,
1058 };
1059 
1060 int
1061 gk104_fifo_fini(struct nvkm_object *object, bool suspend)
1062 {
1063 	struct gk104_fifo *fifo = (void *)object;
1064 	struct nvkm_device *device = fifo->base.engine.subdev.device;
1065 	int ret;
1066 
1067 	ret = nvkm_fifo_fini(&fifo->base, suspend);
1068 	if (ret)
1069 		return ret;
1070 
1071 	/* allow mmu fault interrupts, even when we're not using fifo */
1072 	nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
1073 	return 0;
1074 }
1075 
1076 int
1077 gk104_fifo_init(struct nvkm_object *object)
1078 {
1079 	struct gk104_fifo *fifo = (void *)object;
1080 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
1081 	struct nvkm_device *device = subdev->device;
1082 	int ret, i;
1083 
1084 	ret = nvkm_fifo_init(&fifo->base);
1085 	if (ret)
1086 		return ret;
1087 
1088 	/* enable all available PBDMA units */
1089 	nvkm_wr32(device, 0x000204, 0xffffffff);
1090 	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204));
1091 	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
1092 
1093 	/* PBDMA[n] */
1094 	for (i = 0; i < fifo->spoon_nr; i++) {
1095 		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
1096 		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
1097 		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
1098 	}
1099 
1100 	/* PBDMA[n].HCE */
1101 	for (i = 0; i < fifo->spoon_nr; i++) {
1102 		nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
1103 		nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
1104 	}
1105 
1106 	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
1107 
1108 	nvkm_wr32(device, 0x002100, 0xffffffff);
1109 	nvkm_wr32(device, 0x002140, 0x7fffffff);
1110 	return 0;
1111 }
1112 
1113 void
1114 gk104_fifo_dtor(struct nvkm_object *object)
1115 {
1116 	struct gk104_fifo *fifo = (void *)object;
1117 	int i;
1118 
1119 	nvkm_gpuobj_unmap(&fifo->user.bar);
1120 	nvkm_gpuobj_ref(NULL, &fifo->user.mem);
1121 
1122 	for (i = 0; i < FIFO_ENGINE_NR; i++) {
1123 		nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[1]);
1124 		nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[0]);
1125 	}
1126 
1127 	nvkm_fifo_destroy(&fifo->base);
1128 }
1129 
1130 int
1131 gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
1132 		struct nvkm_oclass *oclass, void *data, u32 size,
1133 		struct nvkm_object **pobject)
1134 {
1135 	struct gk104_fifo_impl *impl = (void *)oclass;
1136 	struct gk104_fifo *fifo;
1137 	int ret, i;
1138 
1139 	ret = nvkm_fifo_create(parent, engine, oclass, 0,
1140 			       impl->channels - 1, &fifo);
1141 	*pobject = nv_object(fifo);
1142 	if (ret)
1143 		return ret;
1144 
1145 	INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
1146 
1147 	for (i = 0; i < FIFO_ENGINE_NR; i++) {
1148 		ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000,
1149 				      0, &fifo->engine[i].runlist[0]);
1150 		if (ret)
1151 			return ret;
1152 
1153 		ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000,
1154 				      0, &fifo->engine[i].runlist[1]);
1155 		if (ret)
1156 			return ret;
1157 
1158 		init_waitqueue_head(&fifo->engine[i].wait);
1159 	}
1160 
1161 	ret = nvkm_gpuobj_new(nv_object(fifo), NULL, impl->channels * 0x200,
1162 			      0x1000, NVOBJ_FLAG_ZERO_ALLOC, &fifo->user.mem);
1163 	if (ret)
1164 		return ret;
1165 
1166 	ret = nvkm_gpuobj_map(fifo->user.mem, NV_MEM_ACCESS_RW,
1167 			      &fifo->user.bar);
1168 	if (ret)
1169 		return ret;
1170 
1171 	ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &fifo->base.uevent);
1172 	if (ret)
1173 		return ret;
1174 
1175 	nv_subdev(fifo)->unit = 0x00000100;
1176 	nv_subdev(fifo)->intr = gk104_fifo_intr;
1177 	nv_engine(fifo)->cclass = &gk104_fifo_cclass;
1178 	nv_engine(fifo)->sclass = gk104_fifo_sclass;
1179 	return 0;
1180 }
1181 
1182 struct nvkm_oclass *
1183 gk104_fifo_oclass = &(struct gk104_fifo_impl) {
1184 	.base.handle = NV_ENGINE(FIFO, 0xe0),
1185 	.base.ofuncs = &(struct nvkm_ofuncs) {
1186 		.ctor = gk104_fifo_ctor,
1187 		.dtor = gk104_fifo_dtor,
1188 		.init = gk104_fifo_init,
1189 		.fini = gk104_fifo_fini,
1190 	},
1191 	.channels = 4096,
1192 }.base;
1193