1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "gk104.h"
25 
26 #include <core/client.h>
27 #include <core/engctx.h>
28 #include <core/enum.h>
29 #include <core/handle.h>
30 #include <subdev/bar.h>
31 #include <subdev/fb.h>
32 #include <subdev/mmu.h>
33 #include <subdev/timer.h>
34 
35 #include <nvif/class.h>
36 #include <nvif/ioctl.h>
37 #include <nvif/unpack.h>
38 
39 #define _(a,b) { (a), ((1ULL << (a)) | (b)) }
40 static const struct {
41 	u64 subdev;
42 	u64 mask;
43 } fifo_engine[] = {
44 	_(NVDEV_ENGINE_GR      , (1ULL << NVDEV_ENGINE_SW) |
45 				 (1ULL << NVDEV_ENGINE_CE2)),
46 	_(NVDEV_ENGINE_MSPDEC  , 0),
47 	_(NVDEV_ENGINE_MSPPP   , 0),
48 	_(NVDEV_ENGINE_MSVLD   , 0),
49 	_(NVDEV_ENGINE_CE0     , 0),
50 	_(NVDEV_ENGINE_CE1     , 0),
51 	_(NVDEV_ENGINE_MSENC   , 0),
52 };
53 #undef _
54 #define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
55 
56 struct gk104_fifo_engn {
57 	struct nvkm_memory *runlist[2];
58 	int cur_runlist;
59 	wait_queue_head_t wait;
60 };
61 
62 struct gk104_fifo {
63 	struct nvkm_fifo base;
64 
65 	struct work_struct fault;
66 	u64 mask;
67 
68 	struct gk104_fifo_engn engine[FIFO_ENGINE_NR];
69 	struct {
70 		struct nvkm_memory *mem;
71 		struct nvkm_vma bar;
72 	} user;
73 	int spoon_nr;
74 };
75 
76 struct gk104_fifo_base {
77 	struct nvkm_fifo_base base;
78 	struct nvkm_gpuobj *pgd;
79 	struct nvkm_vm *vm;
80 };
81 
82 struct gk104_fifo_chan {
83 	struct nvkm_fifo_chan base;
84 	u32 engine;
85 	enum {
86 		STOPPED,
87 		RUNNING,
88 		KILLED
89 	} state;
90 };
91 
92 /*******************************************************************************
93  * FIFO channel objects
94  ******************************************************************************/
95 
96 static void
97 gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
98 {
99 	struct gk104_fifo_engn *engn = &fifo->engine[engine];
100 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
101 	struct nvkm_device *device = subdev->device;
102 	struct nvkm_memory *cur;
103 	int i, p;
104 
105 	mutex_lock(&nv_subdev(fifo)->mutex);
106 	cur = engn->runlist[engn->cur_runlist];
107 	engn->cur_runlist = !engn->cur_runlist;
108 
109 	nvkm_kmap(cur);
110 	for (i = 0, p = 0; i < fifo->base.max; i++) {
111 		struct gk104_fifo_chan *chan = (void *)fifo->base.channel[i];
112 		if (chan && chan->state == RUNNING && chan->engine == engine) {
113 			nvkm_wo32(cur, p + 0, i);
114 			nvkm_wo32(cur, p + 4, 0x00000000);
115 			p += 8;
116 		}
117 	}
118 	nvkm_done(cur);
119 
120 	nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
121 	nvkm_wr32(device, 0x002274, (engine << 20) | (p >> 3));
122 
123 	if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
124 			       (engine * 0x08)) & 0x00100000),
125 				msecs_to_jiffies(2000)) == 0)
126 		nvkm_error(subdev, "runlist %d update timeout\n", engine);
127 	mutex_unlock(&nv_subdev(fifo)->mutex);
128 }
129 
130 static int
131 gk104_fifo_context_attach(struct nvkm_object *parent,
132 			  struct nvkm_object *object)
133 {
134 	struct gk104_fifo_base *base = (void *)parent->parent;
135 	struct nvkm_gpuobj *engn = &base->base.gpuobj;
136 	struct nvkm_engctx *ectx = (void *)object;
137 	u32 addr;
138 	int ret;
139 
140 	switch (nv_engidx(object->engine)) {
141 	case NVDEV_ENGINE_SW   :
142 		return 0;
143 	case NVDEV_ENGINE_CE0:
144 	case NVDEV_ENGINE_CE1:
145 	case NVDEV_ENGINE_CE2:
146 		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
147 		return 0;
148 	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
149 	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
150 	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
151 	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
152 	default:
153 		return -EINVAL;
154 	}
155 
156 	if (!ectx->vma.node) {
157 		ret = nvkm_gpuobj_map(nv_gpuobj(ectx), base->vm,
158 				      NV_MEM_ACCESS_RW, &ectx->vma);
159 		if (ret)
160 			return ret;
161 
162 		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
163 	}
164 
165 	nvkm_kmap(engn);
166 	nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
167 	nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset));
168 	nvkm_done(engn);
169 	return 0;
170 }
171 
172 static int
173 gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
174 {
175 	struct nvkm_object *obj = (void *)chan;
176 	struct gk104_fifo *fifo = (void *)obj->engine;
177 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
178 	struct nvkm_device *device = subdev->device;
179 
180 	nvkm_wr32(device, 0x002634, chan->base.chid);
181 	if (nvkm_msec(device, 2000,
182 		if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
183 			break;
184 	) < 0) {
185 		nvkm_error(subdev, "channel %d [%s] kick timeout\n",
186 			   chan->base.chid, nvkm_client_name(chan));
187 		return -EBUSY;
188 	}
189 
190 	return 0;
191 }
192 
193 static int
194 gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
195 			  struct nvkm_object *object)
196 {
197 	struct gk104_fifo_base *base = (void *)parent->parent;
198 	struct gk104_fifo_chan *chan = (void *)parent;
199 	struct nvkm_gpuobj *engn = &base->base.gpuobj;
200 	u32 addr;
201 	int ret;
202 
203 	switch (nv_engidx(object->engine)) {
204 	case NVDEV_ENGINE_SW    : return 0;
205 	case NVDEV_ENGINE_CE0   :
206 	case NVDEV_ENGINE_CE1   :
207 	case NVDEV_ENGINE_CE2   : addr = 0x0000; break;
208 	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
209 	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
210 	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
211 	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
212 	default:
213 		return -EINVAL;
214 	}
215 
216 	ret = gk104_fifo_chan_kick(chan);
217 	if (ret && suspend)
218 		return ret;
219 
220 	if (addr) {
221 		nvkm_kmap(engn);
222 		nvkm_wo32(engn, addr + 0x00, 0x00000000);
223 		nvkm_wo32(engn, addr + 0x04, 0x00000000);
224 		nvkm_done(engn);
225 	}
226 
227 	return 0;
228 }
229 
230 static int
231 gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
232 		     struct nvkm_oclass *oclass, void *data, u32 size,
233 		     struct nvkm_object **pobject)
234 {
235 	union {
236 		struct kepler_channel_gpfifo_a_v0 v0;
237 	} *args = data;
238 	struct gk104_fifo *fifo = (void *)engine;
239 	struct gk104_fifo_base *base = (void *)parent;
240 	struct gk104_fifo_chan *chan;
241 	struct nvkm_gpuobj *ramfc = &base->base.gpuobj;
242 	u64 usermem, ioffset, ilength;
243 	u32 engines;
244 	int ret, i;
245 
246 	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
247 	if (nvif_unpack(args->v0, 0, 0, false)) {
248 		nvif_ioctl(parent, "create channel gpfifo vers %d "
249 				   "ioffset %016llx ilength %08x engine %08x\n",
250 			   args->v0.version, args->v0.ioffset,
251 			   args->v0.ilength, args->v0.engine);
252 		if (args->v0.vm)
253 			return -ENOENT;
254 	} else
255 		return ret;
256 
257 	for (i = 0, engines = 0; i < FIFO_ENGINE_NR; i++) {
258 		if (!nvkm_engine(parent, fifo_engine[i].subdev))
259 			continue;
260 		engines |= (1 << i);
261 	}
262 
263 	if (!args->v0.engine) {
264 		static struct nvkm_oclass oclass = {
265 			.ofuncs = &nvkm_object_ofuncs,
266 		};
267 		args->v0.engine = engines;
268 		return nvkm_object_old(parent, engine, &oclass, NULL, 0, pobject);
269 	}
270 
271 	engines &= args->v0.engine;
272 	if (!engines) {
273 		nvif_ioctl(parent, "unsupported engines %08x\n",
274 			   args->v0.engine);
275 		return -ENODEV;
276 	}
277 	i = __ffs(engines);
278 
279 	ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
280 				       fifo->user.bar.offset, 0x200, 0,
281 				       fifo_engine[i].mask, &chan);
282 	*pobject = nv_object(chan);
283 	if (ret)
284 		return ret;
285 
286 	chan->base.inst = base->base.gpuobj.addr;
287 	args->v0.chid = chan->base.chid;
288 
289 	nv_parent(chan)->context_attach = gk104_fifo_context_attach;
290 	nv_parent(chan)->context_detach = gk104_fifo_context_detach;
291 	chan->engine = i;
292 
293 	usermem = chan->base.chid * 0x200;
294 	ioffset = args->v0.ioffset;
295 	ilength = order_base_2(args->v0.ilength / 8);
296 
297 	nvkm_kmap(fifo->user.mem);
298 	for (i = 0; i < 0x200; i += 4)
299 		nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
300 	nvkm_done(fifo->user.mem);
301 	usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
302 
303 	nvkm_kmap(ramfc);
304 	nvkm_wo32(ramfc, 0x08, lower_32_bits(usermem));
305 	nvkm_wo32(ramfc, 0x0c, upper_32_bits(usermem));
306 	nvkm_wo32(ramfc, 0x10, 0x0000face);
307 	nvkm_wo32(ramfc, 0x30, 0xfffff902);
308 	nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset));
309 	nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
310 	nvkm_wo32(ramfc, 0x84, 0x20400000);
311 	nvkm_wo32(ramfc, 0x94, 0x30000001);
312 	nvkm_wo32(ramfc, 0x9c, 0x00000100);
313 	nvkm_wo32(ramfc, 0xac, 0x0000001f);
314 	nvkm_wo32(ramfc, 0xe8, chan->base.chid);
315 	nvkm_wo32(ramfc, 0xb8, 0xf8000000);
316 	nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */
317 	nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */
318 	nvkm_done(ramfc);
319 	return 0;
320 }
321 
322 static int
323 gk104_fifo_chan_init(struct nvkm_object *object)
324 {
325 	struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
326 	struct gk104_fifo *fifo = (void *)object->engine;
327 	struct gk104_fifo_chan *chan = (void *)object;
328 	struct nvkm_device *device = fifo->base.engine.subdev.device;
329 	u32 chid = chan->base.chid;
330 	int ret;
331 
332 	ret = nvkm_fifo_channel_init(&chan->base);
333 	if (ret)
334 		return ret;
335 
336 	nvkm_mask(device, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
337 	nvkm_wr32(device, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
338 
339 	if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
340 		nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
341 		gk104_fifo_runlist_update(fifo, chan->engine);
342 		nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
343 	}
344 
345 	return 0;
346 }
347 
348 static int
349 gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
350 {
351 	struct gk104_fifo *fifo = (void *)object->engine;
352 	struct gk104_fifo_chan *chan = (void *)object;
353 	struct nvkm_device *device = fifo->base.engine.subdev.device;
354 	u32 chid = chan->base.chid;
355 
356 	if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
357 		nvkm_mask(device, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
358 		gk104_fifo_runlist_update(fifo, chan->engine);
359 	}
360 
361 	nvkm_wr32(device, 0x800000 + (chid * 8), 0x00000000);
362 	return nvkm_fifo_channel_fini(&chan->base, suspend);
363 }
364 
365 struct nvkm_ofuncs
366 gk104_fifo_chan_ofuncs = {
367 	.ctor = gk104_fifo_chan_ctor,
368 	.dtor = _nvkm_fifo_channel_dtor,
369 	.init = gk104_fifo_chan_init,
370 	.fini = gk104_fifo_chan_fini,
371 	.map  = _nvkm_fifo_channel_map,
372 	.rd32 = _nvkm_fifo_channel_rd32,
373 	.wr32 = _nvkm_fifo_channel_wr32,
374 	.ntfy = _nvkm_fifo_channel_ntfy
375 };
376 
377 static struct nvkm_oclass
378 gk104_fifo_sclass[] = {
379 	{ KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
380 	{}
381 };
382 
383 /*******************************************************************************
384  * FIFO context - instmem heap and vm setup
385  ******************************************************************************/
386 
387 static int
388 gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
389 			struct nvkm_oclass *oclass, void *data, u32 size,
390 			struct nvkm_object **pobject)
391 {
392 	struct nvkm_device *device = nv_engine(engine)->subdev.device;
393 	struct gk104_fifo_base *base;
394 	int ret;
395 
396 	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
397 				       0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
398 	*pobject = nv_object(base);
399 	if (ret)
400 		return ret;
401 
402 	ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &base->pgd);
403 	if (ret)
404 		return ret;
405 
406 	nvkm_kmap(&base->base.gpuobj);
407 	nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr));
408 	nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr));
409 	nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff);
410 	nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff);
411 	nvkm_done(&base->base.gpuobj);
412 
413 	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
414 	if (ret)
415 		return ret;
416 
417 	return 0;
418 }
419 
420 static void
421 gk104_fifo_context_dtor(struct nvkm_object *object)
422 {
423 	struct gk104_fifo_base *base = (void *)object;
424 	nvkm_vm_ref(NULL, &base->vm, base->pgd);
425 	nvkm_gpuobj_del(&base->pgd);
426 	nvkm_fifo_context_destroy(&base->base);
427 }
428 
429 static struct nvkm_oclass
430 gk104_fifo_cclass = {
431 	.handle = NV_ENGCTX(FIFO, 0xe0),
432 	.ofuncs = &(struct nvkm_ofuncs) {
433 		.ctor = gk104_fifo_context_ctor,
434 		.dtor = gk104_fifo_context_dtor,
435 		.init = _nvkm_fifo_context_init,
436 		.fini = _nvkm_fifo_context_fini,
437 		.rd32 = _nvkm_fifo_context_rd32,
438 		.wr32 = _nvkm_fifo_context_wr32,
439 	},
440 };
441 
442 /*******************************************************************************
443  * PFIFO engine
444  ******************************************************************************/
445 
446 static inline int
447 gk104_fifo_engidx(struct gk104_fifo *fifo, u32 engn)
448 {
449 	switch (engn) {
450 	case NVDEV_ENGINE_GR    :
451 	case NVDEV_ENGINE_CE2   : engn = 0; break;
452 	case NVDEV_ENGINE_MSVLD : engn = 1; break;
453 	case NVDEV_ENGINE_MSPPP : engn = 2; break;
454 	case NVDEV_ENGINE_MSPDEC: engn = 3; break;
455 	case NVDEV_ENGINE_CE0   : engn = 4; break;
456 	case NVDEV_ENGINE_CE1   : engn = 5; break;
457 	case NVDEV_ENGINE_MSENC : engn = 6; break;
458 	default:
459 		return -1;
460 	}
461 
462 	return engn;
463 }
464 
465 static inline struct nvkm_engine *
466 gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
467 {
468 	if (engn >= ARRAY_SIZE(fifo_engine))
469 		return NULL;
470 	return nvkm_engine(fifo, fifo_engine[engn].subdev);
471 }
472 
473 static void
474 gk104_fifo_recover_work(struct work_struct *work)
475 {
476 	struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault);
477 	struct nvkm_device *device = fifo->base.engine.subdev.device;
478 	struct nvkm_engine *engine;
479 	unsigned long flags;
480 	u32 engn, engm = 0;
481 	u64 mask, todo;
482 
483 	spin_lock_irqsave(&fifo->base.lock, flags);
484 	mask = fifo->mask;
485 	fifo->mask = 0ULL;
486 	spin_unlock_irqrestore(&fifo->base.lock, flags);
487 
488 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
489 		engm |= 1 << gk104_fifo_engidx(fifo, engn);
490 	nvkm_mask(device, 0x002630, engm, engm);
491 
492 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
493 		if ((engine = nvkm_device_engine(device, engn))) {
494 			nvkm_subdev_fini(&engine->subdev, false);
495 			WARN_ON(nvkm_subdev_init(&engine->subdev));
496 		}
497 		gk104_fifo_runlist_update(fifo, gk104_fifo_engidx(fifo, engn));
498 	}
499 
500 	nvkm_wr32(device, 0x00262c, engm);
501 	nvkm_mask(device, 0x002630, engm, 0x00000000);
502 }
503 
504 static void
505 gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
506 		  struct gk104_fifo_chan *chan)
507 {
508 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
509 	struct nvkm_device *device = subdev->device;
510 	u32 chid = chan->base.chid;
511 	unsigned long flags;
512 
513 	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
514 		   nvkm_subdev_name[nv_subdev(engine)->index], chid);
515 
516 	nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
517 	chan->state = KILLED;
518 
519 	spin_lock_irqsave(&fifo->base.lock, flags);
520 	fifo->mask |= 1ULL << nv_engidx(engine);
521 	spin_unlock_irqrestore(&fifo->base.lock, flags);
522 	schedule_work(&fifo->fault);
523 }
524 
525 static int
526 gk104_fifo_swmthd(struct gk104_fifo *fifo, u32 chid, u32 mthd, u32 data)
527 {
528 	struct gk104_fifo_chan *chan = NULL;
529 	struct nvkm_handle *bind;
530 	unsigned long flags;
531 	int ret = -EINVAL;
532 
533 	spin_lock_irqsave(&fifo->base.lock, flags);
534 	if (likely(chid >= fifo->base.min && chid <= fifo->base.max))
535 		chan = (void *)fifo->base.channel[chid];
536 	if (unlikely(!chan))
537 		goto out;
538 
539 	bind = nvkm_namedb_get_class(nv_namedb(chan), NVIF_IOCTL_NEW_V0_SW_GF100);
540 	if (likely(bind)) {
541 		if (!mthd || !nv_call(bind->object, mthd, data))
542 			ret = 0;
543 		nvkm_namedb_put(bind);
544 	}
545 
546 out:
547 	spin_unlock_irqrestore(&fifo->base.lock, flags);
548 	return ret;
549 }
550 
551 static const struct nvkm_enum
552 gk104_fifo_bind_reason[] = {
553 	{ 0x01, "BIND_NOT_UNBOUND" },
554 	{ 0x02, "SNOOP_WITHOUT_BAR1" },
555 	{ 0x03, "UNBIND_WHILE_RUNNING" },
556 	{ 0x05, "INVALID_RUNLIST" },
557 	{ 0x06, "INVALID_CTX_TGT" },
558 	{ 0x0b, "UNBIND_WHILE_PARKED" },
559 	{}
560 };
561 
562 static void
563 gk104_fifo_intr_bind(struct gk104_fifo *fifo)
564 {
565 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
566 	struct nvkm_device *device = subdev->device;
567 	u32 intr = nvkm_rd32(device, 0x00252c);
568 	u32 code = intr & 0x000000ff;
569 	const struct nvkm_enum *en =
570 		nvkm_enum_find(gk104_fifo_bind_reason, code);
571 
572 	nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
573 }
574 
575 static const struct nvkm_enum
576 gk104_fifo_sched_reason[] = {
577 	{ 0x0a, "CTXSW_TIMEOUT" },
578 	{}
579 };
580 
581 static void
582 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
583 {
584 	struct nvkm_device *device = fifo->base.engine.subdev.device;
585 	struct nvkm_engine *engine;
586 	struct gk104_fifo_chan *chan;
587 	u32 engn;
588 
589 	for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) {
590 		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
591 		u32 busy = (stat & 0x80000000);
592 		u32 next = (stat & 0x07ff0000) >> 16;
593 		u32 chsw = (stat & 0x00008000);
594 		u32 save = (stat & 0x00004000);
595 		u32 load = (stat & 0x00002000);
596 		u32 prev = (stat & 0x000007ff);
597 		u32 chid = load ? next : prev;
598 		(void)save;
599 
600 		if (busy && chsw) {
601 			if (!(chan = (void *)fifo->base.channel[chid]))
602 				continue;
603 			if (!(engine = gk104_fifo_engine(fifo, engn)))
604 				continue;
605 			gk104_fifo_recover(fifo, engine, chan);
606 		}
607 	}
608 }
609 
610 static void
611 gk104_fifo_intr_sched(struct gk104_fifo *fifo)
612 {
613 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
614 	struct nvkm_device *device = subdev->device;
615 	u32 intr = nvkm_rd32(device, 0x00254c);
616 	u32 code = intr & 0x000000ff;
617 	const struct nvkm_enum *en =
618 		nvkm_enum_find(gk104_fifo_sched_reason, code);
619 
620 	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
621 
622 	switch (code) {
623 	case 0x0a:
624 		gk104_fifo_intr_sched_ctxsw(fifo);
625 		break;
626 	default:
627 		break;
628 	}
629 }
630 
631 static void
632 gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
633 {
634 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
635 	struct nvkm_device *device = subdev->device;
636 	u32 stat = nvkm_rd32(device, 0x00256c);
637 	nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
638 	nvkm_wr32(device, 0x00256c, stat);
639 }
640 
641 static void
642 gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
643 {
644 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
645 	struct nvkm_device *device = subdev->device;
646 	u32 stat = nvkm_rd32(device, 0x00259c);
647 	nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
648 }
649 
650 static const struct nvkm_enum
651 gk104_fifo_fault_engine[] = {
652 	{ 0x00, "GR", NULL, NVDEV_ENGINE_GR },
653 	{ 0x03, "IFB", NULL, NVDEV_ENGINE_IFB },
654 	{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
655 	{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
656 	{ 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
657 	{ 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
658 	{ 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
659 	{ 0x10, "MSVLD", NULL, NVDEV_ENGINE_MSVLD },
660 	{ 0x11, "MSPPP", NULL, NVDEV_ENGINE_MSPPP },
661 	{ 0x13, "PERF" },
662 	{ 0x14, "MSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
663 	{ 0x15, "CE0", NULL, NVDEV_ENGINE_CE0 },
664 	{ 0x16, "CE1", NULL, NVDEV_ENGINE_CE1 },
665 	{ 0x17, "PMU" },
666 	{ 0x19, "MSENC", NULL, NVDEV_ENGINE_MSENC },
667 	{ 0x1b, "CE2", NULL, NVDEV_ENGINE_CE2 },
668 	{}
669 };
670 
671 static const struct nvkm_enum
672 gk104_fifo_fault_reason[] = {
673 	{ 0x00, "PDE" },
674 	{ 0x01, "PDE_SIZE" },
675 	{ 0x02, "PTE" },
676 	{ 0x03, "VA_LIMIT_VIOLATION" },
677 	{ 0x04, "UNBOUND_INST_BLOCK" },
678 	{ 0x05, "PRIV_VIOLATION" },
679 	{ 0x06, "RO_VIOLATION" },
680 	{ 0x07, "WO_VIOLATION" },
681 	{ 0x08, "PITCH_MASK_VIOLATION" },
682 	{ 0x09, "WORK_CREATION" },
683 	{ 0x0a, "UNSUPPORTED_APERTURE" },
684 	{ 0x0b, "COMPRESSION_FAILURE" },
685 	{ 0x0c, "UNSUPPORTED_KIND" },
686 	{ 0x0d, "REGION_VIOLATION" },
687 	{ 0x0e, "BOTH_PTES_VALID" },
688 	{ 0x0f, "INFO_TYPE_POISONED" },
689 	{}
690 };
691 
692 static const struct nvkm_enum
693 gk104_fifo_fault_hubclient[] = {
694 	{ 0x00, "VIP" },
695 	{ 0x01, "CE0" },
696 	{ 0x02, "CE1" },
697 	{ 0x03, "DNISO" },
698 	{ 0x04, "FE" },
699 	{ 0x05, "FECS" },
700 	{ 0x06, "HOST" },
701 	{ 0x07, "HOST_CPU" },
702 	{ 0x08, "HOST_CPU_NB" },
703 	{ 0x09, "ISO" },
704 	{ 0x0a, "MMU" },
705 	{ 0x0b, "MSPDEC" },
706 	{ 0x0c, "MSPPP" },
707 	{ 0x0d, "MSVLD" },
708 	{ 0x0e, "NISO" },
709 	{ 0x0f, "P2P" },
710 	{ 0x10, "PD" },
711 	{ 0x11, "PERF" },
712 	{ 0x12, "PMU" },
713 	{ 0x13, "RASTERTWOD" },
714 	{ 0x14, "SCC" },
715 	{ 0x15, "SCC_NB" },
716 	{ 0x16, "SEC" },
717 	{ 0x17, "SSYNC" },
718 	{ 0x18, "GR_CE" },
719 	{ 0x19, "CE2" },
720 	{ 0x1a, "XV" },
721 	{ 0x1b, "MMU_NB" },
722 	{ 0x1c, "MSENC" },
723 	{ 0x1d, "DFALCON" },
724 	{ 0x1e, "SKED" },
725 	{ 0x1f, "AFALCON" },
726 	{}
727 };
728 
729 static const struct nvkm_enum
730 gk104_fifo_fault_gpcclient[] = {
731 	{ 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
732 	{ 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
733 	{ 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
734 	{ 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
735 	{ 0x0c, "RAST" },
736 	{ 0x0d, "GCC" },
737 	{ 0x0e, "GPCCS" },
738 	{ 0x0f, "PROP_0" },
739 	{ 0x10, "PROP_1" },
740 	{ 0x11, "PROP_2" },
741 	{ 0x12, "PROP_3" },
742 	{ 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
743 	{ 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
744 	{ 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
745 	{ 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
746 	{ 0x1f, "GPM" },
747 	{ 0x20, "LTP_UTLB_0" },
748 	{ 0x21, "LTP_UTLB_1" },
749 	{ 0x22, "LTP_UTLB_2" },
750 	{ 0x23, "LTP_UTLB_3" },
751 	{ 0x24, "GPC_RGG_UTLB" },
752 	{}
753 };
754 
755 static void
756 gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
757 {
758 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
759 	struct nvkm_device *device = subdev->device;
760 	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
761 	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
762 	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
763 	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
764 	u32 gpc    = (stat & 0x1f000000) >> 24;
765 	u32 client = (stat & 0x00001f00) >> 8;
766 	u32 write  = (stat & 0x00000080);
767 	u32 hub    = (stat & 0x00000040);
768 	u32 reason = (stat & 0x0000000f);
769 	struct nvkm_object *engctx = NULL, *object;
770 	struct nvkm_engine *engine = NULL;
771 	const struct nvkm_enum *er, *eu, *ec;
772 	char gpcid[8] = "";
773 
774 	er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
775 	eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
776 	if (hub) {
777 		ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
778 	} else {
779 		ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
780 		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
781 	}
782 
783 	if (eu) {
784 		switch (eu->data2) {
785 		case NVDEV_SUBDEV_BAR:
786 			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
787 			break;
788 		case NVDEV_SUBDEV_INSTMEM:
789 			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
790 			break;
791 		case NVDEV_ENGINE_IFB:
792 			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
793 			break;
794 		default:
795 			engine = nvkm_engine(fifo, eu->data2);
796 			if (engine)
797 				engctx = nvkm_engctx_get(engine, inst);
798 			break;
799 		}
800 	}
801 
802 	nvkm_error(subdev,
803 		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
804 		   "reason %02x [%s] on channel %d [%010llx %s]\n",
805 		   write ? "write" : "read", (u64)vahi << 32 | valo,
806 		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
807 		   reason, er ? er->name : "", -1, (u64)inst << 12,
808 		   nvkm_client_name(engctx));
809 
810 	object = engctx;
811 	while (object) {
812 		switch (nv_mclass(object)) {
813 		case KEPLER_CHANNEL_GPFIFO_A:
814 		case MAXWELL_CHANNEL_GPFIFO_A:
815 			gk104_fifo_recover(fifo, engine, (void *)object);
816 			break;
817 		}
818 		object = object->parent;
819 	}
820 
821 	nvkm_engctx_put(engctx);
822 }
823 
824 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
825 	{ 0x00000001, "MEMREQ" },
826 	{ 0x00000002, "MEMACK_TIMEOUT" },
827 	{ 0x00000004, "MEMACK_EXTRA" },
828 	{ 0x00000008, "MEMDAT_TIMEOUT" },
829 	{ 0x00000010, "MEMDAT_EXTRA" },
830 	{ 0x00000020, "MEMFLUSH" },
831 	{ 0x00000040, "MEMOP" },
832 	{ 0x00000080, "LBCONNECT" },
833 	{ 0x00000100, "LBREQ" },
834 	{ 0x00000200, "LBACK_TIMEOUT" },
835 	{ 0x00000400, "LBACK_EXTRA" },
836 	{ 0x00000800, "LBDAT_TIMEOUT" },
837 	{ 0x00001000, "LBDAT_EXTRA" },
838 	{ 0x00002000, "GPFIFO" },
839 	{ 0x00004000, "GPPTR" },
840 	{ 0x00008000, "GPENTRY" },
841 	{ 0x00010000, "GPCRC" },
842 	{ 0x00020000, "PBPTR" },
843 	{ 0x00040000, "PBENTRY" },
844 	{ 0x00080000, "PBCRC" },
845 	{ 0x00100000, "XBARCONNECT" },
846 	{ 0x00200000, "METHOD" },
847 	{ 0x00400000, "METHODCRC" },
848 	{ 0x00800000, "DEVICE" },
849 	{ 0x02000000, "SEMAPHORE" },
850 	{ 0x04000000, "ACQUIRE" },
851 	{ 0x08000000, "PRI" },
852 	{ 0x20000000, "NO_CTXSW_SEG" },
853 	{ 0x40000000, "PBSEG" },
854 	{ 0x80000000, "SIGNATURE" },
855 	{}
856 };
857 
858 static void
859 gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
860 {
861 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
862 	struct nvkm_device *device = subdev->device;
863 	u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
864 	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
865 	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
866 	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
867 	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
868 	u32 subc = (addr & 0x00070000) >> 16;
869 	u32 mthd = (addr & 0x00003ffc);
870 	u32 show = stat;
871 	char msg[128];
872 
873 	if (stat & 0x00800000) {
874 		if (!gk104_fifo_swmthd(fifo, chid, mthd, data))
875 			show &= ~0x00800000;
876 		nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
877 	}
878 
879 	if (show) {
880 		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
881 		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%s] subc %d "
882 				   "mthd %04x data %08x\n",
883 			   unit, show, msg, chid,
884 			   nvkm_client_name_for_fifo_chid(&fifo->base, chid),
885 			   subc, mthd, data);
886 	}
887 
888 	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
889 }
890 
891 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
892 	{ 0x00000001, "HCE_RE_ILLEGAL_OP" },
893 	{ 0x00000002, "HCE_RE_ALIGNB" },
894 	{ 0x00000004, "HCE_PRIV" },
895 	{ 0x00000008, "HCE_ILLEGAL_MTHD" },
896 	{ 0x00000010, "HCE_ILLEGAL_CLASS" },
897 	{}
898 };
899 
900 static void
901 gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
902 {
903 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
904 	struct nvkm_device *device = subdev->device;
905 	u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
906 	u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
907 	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
908 	char msg[128];
909 
910 	if (stat) {
911 		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
912 		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
913 			   unit, stat, msg, chid,
914 			   nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
915 			   nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
916 	}
917 
918 	nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
919 }
920 
921 static void
922 gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
923 {
924 	struct nvkm_device *device = fifo->base.engine.subdev.device;
925 	u32 mask = nvkm_rd32(device, 0x002a00);
926 	while (mask) {
927 		u32 engn = __ffs(mask);
928 		wake_up(&fifo->engine[engn].wait);
929 		nvkm_wr32(device, 0x002a00, 1 << engn);
930 		mask &= ~(1 << engn);
931 	}
932 }
933 
934 static void
935 gk104_fifo_intr_engine(struct gk104_fifo *fifo)
936 {
937 	nvkm_fifo_uevent(&fifo->base);
938 }
939 
940 static void
941 gk104_fifo_intr(struct nvkm_subdev *subdev)
942 {
943 	struct gk104_fifo *fifo = (void *)subdev;
944 	struct nvkm_device *device = fifo->base.engine.subdev.device;
945 	u32 mask = nvkm_rd32(device, 0x002140);
946 	u32 stat = nvkm_rd32(device, 0x002100) & mask;
947 
948 	if (stat & 0x00000001) {
949 		gk104_fifo_intr_bind(fifo);
950 		nvkm_wr32(device, 0x002100, 0x00000001);
951 		stat &= ~0x00000001;
952 	}
953 
954 	if (stat & 0x00000010) {
955 		nvkm_error(subdev, "PIO_ERROR\n");
956 		nvkm_wr32(device, 0x002100, 0x00000010);
957 		stat &= ~0x00000010;
958 	}
959 
960 	if (stat & 0x00000100) {
961 		gk104_fifo_intr_sched(fifo);
962 		nvkm_wr32(device, 0x002100, 0x00000100);
963 		stat &= ~0x00000100;
964 	}
965 
966 	if (stat & 0x00010000) {
967 		gk104_fifo_intr_chsw(fifo);
968 		nvkm_wr32(device, 0x002100, 0x00010000);
969 		stat &= ~0x00010000;
970 	}
971 
972 	if (stat & 0x00800000) {
973 		nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
974 		nvkm_wr32(device, 0x002100, 0x00800000);
975 		stat &= ~0x00800000;
976 	}
977 
978 	if (stat & 0x01000000) {
979 		nvkm_error(subdev, "LB_ERROR\n");
980 		nvkm_wr32(device, 0x002100, 0x01000000);
981 		stat &= ~0x01000000;
982 	}
983 
984 	if (stat & 0x08000000) {
985 		gk104_fifo_intr_dropped_fault(fifo);
986 		nvkm_wr32(device, 0x002100, 0x08000000);
987 		stat &= ~0x08000000;
988 	}
989 
990 	if (stat & 0x10000000) {
991 		u32 mask = nvkm_rd32(device, 0x00259c);
992 		while (mask) {
993 			u32 unit = __ffs(mask);
994 			gk104_fifo_intr_fault(fifo, unit);
995 			nvkm_wr32(device, 0x00259c, (1 << unit));
996 			mask &= ~(1 << unit);
997 		}
998 		stat &= ~0x10000000;
999 	}
1000 
1001 	if (stat & 0x20000000) {
1002 		u32 mask = nvkm_rd32(device, 0x0025a0);
1003 		while (mask) {
1004 			u32 unit = __ffs(mask);
1005 			gk104_fifo_intr_pbdma_0(fifo, unit);
1006 			gk104_fifo_intr_pbdma_1(fifo, unit);
1007 			nvkm_wr32(device, 0x0025a0, (1 << unit));
1008 			mask &= ~(1 << unit);
1009 		}
1010 		stat &= ~0x20000000;
1011 	}
1012 
1013 	if (stat & 0x40000000) {
1014 		gk104_fifo_intr_runlist(fifo);
1015 		stat &= ~0x40000000;
1016 	}
1017 
1018 	if (stat & 0x80000000) {
1019 		nvkm_wr32(device, 0x002100, 0x80000000);
1020 		gk104_fifo_intr_engine(fifo);
1021 		stat &= ~0x80000000;
1022 	}
1023 
1024 	if (stat) {
1025 		nvkm_error(subdev, "INTR %08x\n", stat);
1026 		nvkm_mask(device, 0x002140, stat, 0x00000000);
1027 		nvkm_wr32(device, 0x002100, stat);
1028 	}
1029 }
1030 
1031 static void
1032 gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index)
1033 {
1034 	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
1035 	struct nvkm_device *device = fifo->engine.subdev.device;
1036 	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
1037 }
1038 
1039 static void
1040 gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
1041 {
1042 	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
1043 	struct nvkm_device *device = fifo->engine.subdev.device;
1044 	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
1045 }
1046 
1047 static const struct nvkm_event_func
1048 gk104_fifo_uevent_func = {
1049 	.ctor = nvkm_fifo_uevent_ctor,
1050 	.init = gk104_fifo_uevent_init,
1051 	.fini = gk104_fifo_uevent_fini,
1052 };
1053 
1054 int
1055 gk104_fifo_fini(struct nvkm_object *object, bool suspend)
1056 {
1057 	struct gk104_fifo *fifo = (void *)object;
1058 	struct nvkm_device *device = fifo->base.engine.subdev.device;
1059 	int ret;
1060 
1061 	ret = nvkm_fifo_fini(&fifo->base, suspend);
1062 	if (ret)
1063 		return ret;
1064 
1065 	/* allow mmu fault interrupts, even when we're not using fifo */
1066 	nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
1067 	return 0;
1068 }
1069 
1070 int
1071 gk104_fifo_init(struct nvkm_object *object)
1072 {
1073 	struct gk104_fifo *fifo = (void *)object;
1074 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
1075 	struct nvkm_device *device = subdev->device;
1076 	int ret, i;
1077 
1078 	ret = nvkm_fifo_init(&fifo->base);
1079 	if (ret)
1080 		return ret;
1081 
1082 	/* enable all available PBDMA units */
1083 	nvkm_wr32(device, 0x000204, 0xffffffff);
1084 	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204));
1085 	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
1086 
1087 	/* PBDMA[n] */
1088 	for (i = 0; i < fifo->spoon_nr; i++) {
1089 		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
1090 		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
1091 		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
1092 	}
1093 
1094 	/* PBDMA[n].HCE */
1095 	for (i = 0; i < fifo->spoon_nr; i++) {
1096 		nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
1097 		nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
1098 	}
1099 
1100 	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
1101 
1102 	nvkm_wr32(device, 0x002100, 0xffffffff);
1103 	nvkm_wr32(device, 0x002140, 0x7fffffff);
1104 	return 0;
1105 }
1106 
1107 void
1108 gk104_fifo_dtor(struct nvkm_object *object)
1109 {
1110 	struct gk104_fifo *fifo = (void *)object;
1111 	int i;
1112 
1113 	nvkm_vm_put(&fifo->user.bar);
1114 	nvkm_memory_del(&fifo->user.mem);
1115 
1116 	for (i = 0; i < FIFO_ENGINE_NR; i++) {
1117 		nvkm_memory_del(&fifo->engine[i].runlist[1]);
1118 		nvkm_memory_del(&fifo->engine[i].runlist[0]);
1119 	}
1120 
1121 	nvkm_fifo_destroy(&fifo->base);
1122 }
1123 
1124 int
1125 gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
1126 		struct nvkm_oclass *oclass, void *data, u32 size,
1127 		struct nvkm_object **pobject)
1128 {
1129 	struct nvkm_device *device = (void *)parent;
1130 	struct nvkm_bar *bar = device->bar;
1131 	struct gk104_fifo_impl *impl = (void *)oclass;
1132 	struct gk104_fifo *fifo;
1133 	int ret, i;
1134 
1135 	ret = nvkm_fifo_create(parent, engine, oclass, 0,
1136 			       impl->channels - 1, &fifo);
1137 	*pobject = nv_object(fifo);
1138 	if (ret)
1139 		return ret;
1140 
1141 	INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
1142 
1143 	for (i = 0; i < FIFO_ENGINE_NR; i++) {
1144 		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
1145 				      0x8000, 0x1000, false,
1146 				      &fifo->engine[i].runlist[0]);
1147 		if (ret)
1148 			return ret;
1149 
1150 		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
1151 				      0x8000, 0x1000, false,
1152 				      &fifo->engine[i].runlist[1]);
1153 		if (ret)
1154 			return ret;
1155 
1156 		init_waitqueue_head(&fifo->engine[i].wait);
1157 	}
1158 
1159 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
1160 			      impl->channels * 0x200, 0x1000,
1161 			      true, &fifo->user.mem);
1162 	if (ret)
1163 		return ret;
1164 
1165 	ret = bar->umap(bar, impl->channels * 0x200, 12, &fifo->user.bar);
1166 	if (ret)
1167 		return ret;
1168 
1169 	nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
1170 
1171 	ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &fifo->base.uevent);
1172 	if (ret)
1173 		return ret;
1174 
1175 	nv_subdev(fifo)->unit = 0x00000100;
1176 	nv_subdev(fifo)->intr = gk104_fifo_intr;
1177 	nv_engine(fifo)->cclass = &gk104_fifo_cclass;
1178 	nv_engine(fifo)->sclass = gk104_fifo_sclass;
1179 	return 0;
1180 }
1181 
1182 struct nvkm_oclass *
1183 gk104_fifo_oclass = &(struct gk104_fifo_impl) {
1184 	.base.handle = NV_ENGINE(FIFO, 0xe0),
1185 	.base.ofuncs = &(struct nvkm_ofuncs) {
1186 		.ctor = gk104_fifo_ctor,
1187 		.dtor = gk104_fifo_dtor,
1188 		.init = gk104_fifo_init,
1189 		.fini = gk104_fifo_fini,
1190 	},
1191 	.channels = 4096,
1192 }.base;
1193