1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include <engine/fifo.h>
25 
26 #include <core/client.h>
27 #include <core/engctx.h>
28 #include <core/enum.h>
29 #include <core/handle.h>
30 #include <subdev/bar.h>
31 #include <subdev/fb.h>
32 #include <subdev/mmu.h>
33 #include <subdev/timer.h>
34 
35 #include <nvif/class.h>
36 #include <nvif/ioctl.h>
37 #include <nvif/unpack.h>
38 
39 struct gf100_fifo {
40 	struct nvkm_fifo base;
41 
42 	struct work_struct fault;
43 	u64 mask;
44 
45 	struct {
46 		struct nvkm_memory *mem[2];
47 		int active;
48 		wait_queue_head_t wait;
49 	} runlist;
50 
51 	struct {
52 		struct nvkm_memory *mem;
53 		struct nvkm_vma bar;
54 	} user;
55 	int spoon_nr;
56 };
57 
58 struct gf100_fifo_base {
59 	struct nvkm_fifo_base base;
60 	struct nvkm_gpuobj *pgd;
61 	struct nvkm_vm *vm;
62 };
63 
64 struct gf100_fifo_chan {
65 	struct nvkm_fifo_chan base;
66 	enum {
67 		STOPPED,
68 		RUNNING,
69 		KILLED
70 	} state;
71 };
72 
73 /*******************************************************************************
74  * FIFO channel objects
75  ******************************************************************************/
76 
77 static void
78 gf100_fifo_runlist_update(struct gf100_fifo *fifo)
79 {
80 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
81 	struct nvkm_device *device = subdev->device;
82 	struct nvkm_memory *cur;
83 	int i, p;
84 
85 	mutex_lock(&nv_subdev(fifo)->mutex);
86 	cur = fifo->runlist.mem[fifo->runlist.active];
87 	fifo->runlist.active = !fifo->runlist.active;
88 
89 	nvkm_kmap(cur);
90 	for (i = 0, p = 0; i < 128; i++) {
91 		struct gf100_fifo_chan *chan = (void *)fifo->base.channel[i];
92 		if (chan && chan->state == RUNNING) {
93 			nvkm_wo32(cur, p + 0, i);
94 			nvkm_wo32(cur, p + 4, 0x00000004);
95 			p += 8;
96 		}
97 	}
98 	nvkm_done(cur);
99 
100 	nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
101 	nvkm_wr32(device, 0x002274, 0x01f00000 | (p >> 3));
102 
103 	if (wait_event_timeout(fifo->runlist.wait,
104 			       !(nvkm_rd32(device, 0x00227c) & 0x00100000),
105 			       msecs_to_jiffies(2000)) == 0)
106 		nvkm_error(subdev, "runlist update timeout\n");
107 	mutex_unlock(&nv_subdev(fifo)->mutex);
108 }
109 
110 static int
111 gf100_fifo_context_attach(struct nvkm_object *parent,
112 			  struct nvkm_object *object)
113 {
114 	struct gf100_fifo_base *base = (void *)parent->parent;
115 	struct nvkm_gpuobj *engn = &base->base.gpuobj;
116 	struct nvkm_engctx *ectx = (void *)object;
117 	u32 addr;
118 	int ret;
119 
120 	switch (nv_engidx(object->engine)) {
121 	case NVDEV_ENGINE_SW    : return 0;
122 	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
123 	case NVDEV_ENGINE_CE0   : addr = 0x0230; break;
124 	case NVDEV_ENGINE_CE1   : addr = 0x0240; break;
125 	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
126 	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
127 	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
128 	default:
129 		return -EINVAL;
130 	}
131 
132 	if (!ectx->vma.node) {
133 		ret = nvkm_gpuobj_map(nv_gpuobj(ectx), base->vm,
134 				      NV_MEM_ACCESS_RW, &ectx->vma);
135 		if (ret)
136 			return ret;
137 
138 		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
139 	}
140 
141 	nvkm_kmap(engn);
142 	nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
143 	nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset));
144 	nvkm_done(engn);
145 	return 0;
146 }
147 
148 static int
149 gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
150 			  struct nvkm_object *object)
151 {
152 	struct gf100_fifo *fifo = (void *)parent->engine;
153 	struct gf100_fifo_base *base = (void *)parent->parent;
154 	struct gf100_fifo_chan *chan = (void *)parent;
155 	struct nvkm_gpuobj *engn = &base->base.gpuobj;
156 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
157 	struct nvkm_device *device = subdev->device;
158 	u32 addr;
159 
160 	switch (nv_engidx(object->engine)) {
161 	case NVDEV_ENGINE_SW    : return 0;
162 	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
163 	case NVDEV_ENGINE_CE0   : addr = 0x0230; break;
164 	case NVDEV_ENGINE_CE1   : addr = 0x0240; break;
165 	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
166 	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
167 	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
168 	default:
169 		return -EINVAL;
170 	}
171 
172 	nvkm_wr32(device, 0x002634, chan->base.chid);
173 	if (nvkm_msec(device, 2000,
174 		if (nvkm_rd32(device, 0x002634) == chan->base.chid)
175 			break;
176 	) < 0) {
177 		nvkm_error(subdev, "channel %d [%s] kick timeout\n",
178 			   chan->base.chid, nvkm_client_name(chan));
179 		if (suspend)
180 			return -EBUSY;
181 	}
182 
183 	nvkm_kmap(engn);
184 	nvkm_wo32(engn, addr + 0x00, 0x00000000);
185 	nvkm_wo32(engn, addr + 0x04, 0x00000000);
186 	nvkm_done(engn);
187 	return 0;
188 }
189 
190 static int
191 gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
192 		     struct nvkm_oclass *oclass, void *data, u32 size,
193 		     struct nvkm_object **pobject)
194 {
195 	union {
196 		struct fermi_channel_gpfifo_v0 v0;
197 	} *args = data;
198 	struct gf100_fifo *fifo = (void *)engine;
199 	struct gf100_fifo_base *base = (void *)parent;
200 	struct gf100_fifo_chan *chan;
201 	struct nvkm_gpuobj *ramfc = &base->base.gpuobj;
202 	u64 usermem, ioffset, ilength;
203 	int ret, i;
204 
205 	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
206 	if (nvif_unpack(args->v0, 0, 0, false)) {
207 		nvif_ioctl(parent, "create channel gpfifo vers %d "
208 				   "ioffset %016llx ilength %08x\n",
209 			   args->v0.version, args->v0.ioffset,
210 			   args->v0.ilength);
211 		if (args->v0.vm)
212 			return -ENOENT;
213 	} else
214 		return ret;
215 
216 	ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
217 				       fifo->user.bar.offset, 0x1000, 0,
218 				       (1ULL << NVDEV_ENGINE_SW) |
219 				       (1ULL << NVDEV_ENGINE_GR) |
220 				       (1ULL << NVDEV_ENGINE_CE0) |
221 				       (1ULL << NVDEV_ENGINE_CE1) |
222 				       (1ULL << NVDEV_ENGINE_MSVLD) |
223 				       (1ULL << NVDEV_ENGINE_MSPDEC) |
224 				       (1ULL << NVDEV_ENGINE_MSPPP), &chan);
225 	*pobject = nv_object(chan);
226 	if (ret)
227 		return ret;
228 
229 	chan->base.inst = base->base.gpuobj.addr;
230 	args->v0.chid = chan->base.chid;
231 
232 	nv_parent(chan)->context_attach = gf100_fifo_context_attach;
233 	nv_parent(chan)->context_detach = gf100_fifo_context_detach;
234 
235 	usermem = chan->base.chid * 0x1000;
236 	ioffset = args->v0.ioffset;
237 	ilength = order_base_2(args->v0.ilength / 8);
238 
239 	nvkm_kmap(fifo->user.mem);
240 	for (i = 0; i < 0x1000; i += 4)
241 		nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
242 	nvkm_done(fifo->user.mem);
243 	usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
244 
245 	nvkm_kmap(ramfc);
246 	nvkm_wo32(ramfc, 0x08, lower_32_bits(usermem));
247 	nvkm_wo32(ramfc, 0x0c, upper_32_bits(usermem));
248 	nvkm_wo32(ramfc, 0x10, 0x0000face);
249 	nvkm_wo32(ramfc, 0x30, 0xfffff902);
250 	nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset));
251 	nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
252 	nvkm_wo32(ramfc, 0x54, 0x00000002);
253 	nvkm_wo32(ramfc, 0x84, 0x20400000);
254 	nvkm_wo32(ramfc, 0x94, 0x30000001);
255 	nvkm_wo32(ramfc, 0x9c, 0x00000100);
256 	nvkm_wo32(ramfc, 0xa4, 0x1f1f1f1f);
257 	nvkm_wo32(ramfc, 0xa8, 0x1f1f1f1f);
258 	nvkm_wo32(ramfc, 0xac, 0x0000001f);
259 	nvkm_wo32(ramfc, 0xb8, 0xf8000000);
260 	nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */
261 	nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */
262 	nvkm_done(ramfc);
263 	return 0;
264 }
265 
266 static int
267 gf100_fifo_chan_init(struct nvkm_object *object)
268 {
269 	struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
270 	struct gf100_fifo *fifo = (void *)object->engine;
271 	struct gf100_fifo_chan *chan = (void *)object;
272 	struct nvkm_device *device = fifo->base.engine.subdev.device;
273 	u32 chid = chan->base.chid;
274 	int ret;
275 
276 	ret = nvkm_fifo_channel_init(&chan->base);
277 	if (ret)
278 		return ret;
279 
280 	nvkm_wr32(device, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
281 
282 	if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
283 		nvkm_wr32(device, 0x003004 + (chid * 8), 0x001f0001);
284 		gf100_fifo_runlist_update(fifo);
285 	}
286 
287 	return 0;
288 }
289 
290 static void gf100_fifo_intr_engine(struct gf100_fifo *fifo);
291 
292 static int
293 gf100_fifo_chan_fini(struct nvkm_object *object, bool suspend)
294 {
295 	struct gf100_fifo *fifo = (void *)object->engine;
296 	struct gf100_fifo_chan *chan = (void *)object;
297 	struct nvkm_device *device = fifo->base.engine.subdev.device;
298 	u32 chid = chan->base.chid;
299 
300 	if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
301 		nvkm_mask(device, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
302 		gf100_fifo_runlist_update(fifo);
303 	}
304 
305 	gf100_fifo_intr_engine(fifo);
306 
307 	nvkm_wr32(device, 0x003000 + (chid * 8), 0x00000000);
308 	return nvkm_fifo_channel_fini(&chan->base, suspend);
309 }
310 
311 static struct nvkm_ofuncs
312 gf100_fifo_ofuncs = {
313 	.ctor = gf100_fifo_chan_ctor,
314 	.dtor = _nvkm_fifo_channel_dtor,
315 	.init = gf100_fifo_chan_init,
316 	.fini = gf100_fifo_chan_fini,
317 	.map  = _nvkm_fifo_channel_map,
318 	.rd32 = _nvkm_fifo_channel_rd32,
319 	.wr32 = _nvkm_fifo_channel_wr32,
320 	.ntfy = _nvkm_fifo_channel_ntfy
321 };
322 
323 static struct nvkm_oclass
324 gf100_fifo_sclass[] = {
325 	{ FERMI_CHANNEL_GPFIFO, &gf100_fifo_ofuncs },
326 	{}
327 };
328 
329 /*******************************************************************************
330  * FIFO context - instmem heap and vm setup
331  ******************************************************************************/
332 
333 static int
334 gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
335 			struct nvkm_oclass *oclass, void *data, u32 size,
336 			struct nvkm_object **pobject)
337 {
338 	struct nvkm_device *device = nv_engine(engine)->subdev.device;
339 	struct gf100_fifo_base *base;
340 	int ret;
341 
342 	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
343 				       0x1000, NVOBJ_FLAG_ZERO_ALLOC |
344 				       NVOBJ_FLAG_HEAP, &base);
345 	*pobject = nv_object(base);
346 	if (ret)
347 		return ret;
348 
349 	ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &base->pgd);
350 	if (ret)
351 		return ret;
352 
353 	nvkm_kmap(&base->base.gpuobj);
354 	nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr));
355 	nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr));
356 	nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff);
357 	nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff);
358 	nvkm_done(&base->base.gpuobj);
359 
360 	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
361 	if (ret)
362 		return ret;
363 
364 	return 0;
365 }
366 
367 static void
368 gf100_fifo_context_dtor(struct nvkm_object *object)
369 {
370 	struct gf100_fifo_base *base = (void *)object;
371 	nvkm_vm_ref(NULL, &base->vm, base->pgd);
372 	nvkm_gpuobj_del(&base->pgd);
373 	nvkm_fifo_context_destroy(&base->base);
374 }
375 
376 static struct nvkm_oclass
377 gf100_fifo_cclass = {
378 	.handle = NV_ENGCTX(FIFO, 0xc0),
379 	.ofuncs = &(struct nvkm_ofuncs) {
380 		.ctor = gf100_fifo_context_ctor,
381 		.dtor = gf100_fifo_context_dtor,
382 		.init = _nvkm_fifo_context_init,
383 		.fini = _nvkm_fifo_context_fini,
384 		.rd32 = _nvkm_fifo_context_rd32,
385 		.wr32 = _nvkm_fifo_context_wr32,
386 	},
387 };
388 
389 /*******************************************************************************
390  * PFIFO engine
391  ******************************************************************************/
392 
393 static inline int
394 gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
395 {
396 	switch (engn) {
397 	case NVDEV_ENGINE_GR    : engn = 0; break;
398 	case NVDEV_ENGINE_MSVLD : engn = 1; break;
399 	case NVDEV_ENGINE_MSPPP : engn = 2; break;
400 	case NVDEV_ENGINE_MSPDEC: engn = 3; break;
401 	case NVDEV_ENGINE_CE0   : engn = 4; break;
402 	case NVDEV_ENGINE_CE1   : engn = 5; break;
403 	default:
404 		return -1;
405 	}
406 
407 	return engn;
408 }
409 
410 static inline struct nvkm_engine *
411 gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
412 {
413 	switch (engn) {
414 	case 0: engn = NVDEV_ENGINE_GR; break;
415 	case 1: engn = NVDEV_ENGINE_MSVLD; break;
416 	case 2: engn = NVDEV_ENGINE_MSPPP; break;
417 	case 3: engn = NVDEV_ENGINE_MSPDEC; break;
418 	case 4: engn = NVDEV_ENGINE_CE0; break;
419 	case 5: engn = NVDEV_ENGINE_CE1; break;
420 	default:
421 		return NULL;
422 	}
423 
424 	return nvkm_engine(fifo, engn);
425 }
426 
427 static void
428 gf100_fifo_recover_work(struct work_struct *work)
429 {
430 	struct gf100_fifo *fifo = container_of(work, typeof(*fifo), fault);
431 	struct nvkm_device *device = fifo->base.engine.subdev.device;
432 	struct nvkm_engine *engine;
433 	unsigned long flags;
434 	u32 engn, engm = 0;
435 	u64 mask, todo;
436 
437 	spin_lock_irqsave(&fifo->base.lock, flags);
438 	mask = fifo->mask;
439 	fifo->mask = 0ULL;
440 	spin_unlock_irqrestore(&fifo->base.lock, flags);
441 
442 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
443 		engm |= 1 << gf100_fifo_engidx(fifo, engn);
444 	nvkm_mask(device, 0x002630, engm, engm);
445 
446 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
447 		if ((engine = nvkm_device_engine(device, engn))) {
448 			nvkm_subdev_fini(&engine->subdev, false);
449 			WARN_ON(nvkm_subdev_init(&engine->subdev));
450 		}
451 	}
452 
453 	gf100_fifo_runlist_update(fifo);
454 	nvkm_wr32(device, 0x00262c, engm);
455 	nvkm_mask(device, 0x002630, engm, 0x00000000);
456 }
457 
458 static void
459 gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
460 		   struct gf100_fifo_chan *chan)
461 {
462 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
463 	struct nvkm_device *device = subdev->device;
464 	u32 chid = chan->base.chid;
465 	unsigned long flags;
466 
467 	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
468 		   nvkm_subdev_name[engine->subdev.index], chid);
469 
470 	nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
471 	chan->state = KILLED;
472 
473 	spin_lock_irqsave(&fifo->base.lock, flags);
474 	fifo->mask |= 1ULL << nv_engidx(engine);
475 	spin_unlock_irqrestore(&fifo->base.lock, flags);
476 	schedule_work(&fifo->fault);
477 }
478 
479 static int
480 gf100_fifo_swmthd(struct gf100_fifo *fifo, u32 chid, u32 mthd, u32 data)
481 {
482 	struct gf100_fifo_chan *chan = NULL;
483 	struct nvkm_handle *bind;
484 	unsigned long flags;
485 	int ret = -EINVAL;
486 
487 	spin_lock_irqsave(&fifo->base.lock, flags);
488 	if (likely(chid >= fifo->base.min && chid <= fifo->base.max))
489 		chan = (void *)fifo->base.channel[chid];
490 	if (unlikely(!chan))
491 		goto out;
492 
493 	bind = nvkm_namedb_get_class(nv_namedb(chan), NVIF_IOCTL_NEW_V0_SW_GF100);
494 	if (likely(bind)) {
495 		if (!mthd || !nv_call(bind->object, mthd, data))
496 			ret = 0;
497 		nvkm_namedb_put(bind);
498 	}
499 
500 out:
501 	spin_unlock_irqrestore(&fifo->base.lock, flags);
502 	return ret;
503 }
504 
505 static const struct nvkm_enum
506 gf100_fifo_sched_reason[] = {
507 	{ 0x0a, "CTXSW_TIMEOUT" },
508 	{}
509 };
510 
511 static void
512 gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
513 {
514 	struct nvkm_device *device = fifo->base.engine.subdev.device;
515 	struct nvkm_engine *engine;
516 	struct gf100_fifo_chan *chan;
517 	u32 engn;
518 
519 	for (engn = 0; engn < 6; engn++) {
520 		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
521 		u32 busy = (stat & 0x80000000);
522 		u32 save = (stat & 0x00100000); /* maybe? */
523 		u32 unk0 = (stat & 0x00040000);
524 		u32 unk1 = (stat & 0x00001000);
525 		u32 chid = (stat & 0x0000007f);
526 		(void)save;
527 
528 		if (busy && unk0 && unk1) {
529 			if (!(chan = (void *)fifo->base.channel[chid]))
530 				continue;
531 			if (!(engine = gf100_fifo_engine(fifo, engn)))
532 				continue;
533 			gf100_fifo_recover(fifo, engine, chan);
534 		}
535 	}
536 }
537 
538 static void
539 gf100_fifo_intr_sched(struct gf100_fifo *fifo)
540 {
541 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
542 	struct nvkm_device *device = subdev->device;
543 	u32 intr = nvkm_rd32(device, 0x00254c);
544 	u32 code = intr & 0x000000ff;
545 	const struct nvkm_enum *en;
546 
547 	en = nvkm_enum_find(gf100_fifo_sched_reason, code);
548 
549 	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
550 
551 	switch (code) {
552 	case 0x0a:
553 		gf100_fifo_intr_sched_ctxsw(fifo);
554 		break;
555 	default:
556 		break;
557 	}
558 }
559 
560 static const struct nvkm_enum
561 gf100_fifo_fault_engine[] = {
562 	{ 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR },
563 	{ 0x03, "PEEPHOLE", NULL, NVDEV_ENGINE_IFB },
564 	{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
565 	{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
566 	{ 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO },
567 	{ 0x10, "PMSVLD", NULL, NVDEV_ENGINE_MSVLD },
568 	{ 0x11, "PMSPPP", NULL, NVDEV_ENGINE_MSPPP },
569 	{ 0x13, "PCOUNTER" },
570 	{ 0x14, "PMSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
571 	{ 0x15, "PCE0", NULL, NVDEV_ENGINE_CE0 },
572 	{ 0x16, "PCE1", NULL, NVDEV_ENGINE_CE1 },
573 	{ 0x17, "PDAEMON" },
574 	{}
575 };
576 
577 static const struct nvkm_enum
578 gf100_fifo_fault_reason[] = {
579 	{ 0x00, "PT_NOT_PRESENT" },
580 	{ 0x01, "PT_TOO_SHORT" },
581 	{ 0x02, "PAGE_NOT_PRESENT" },
582 	{ 0x03, "VM_LIMIT_EXCEEDED" },
583 	{ 0x04, "NO_CHANNEL" },
584 	{ 0x05, "PAGE_SYSTEM_ONLY" },
585 	{ 0x06, "PAGE_READ_ONLY" },
586 	{ 0x0a, "COMPRESSED_SYSRAM" },
587 	{ 0x0c, "INVALID_STORAGE_TYPE" },
588 	{}
589 };
590 
591 static const struct nvkm_enum
592 gf100_fifo_fault_hubclient[] = {
593 	{ 0x01, "PCOPY0" },
594 	{ 0x02, "PCOPY1" },
595 	{ 0x04, "DISPATCH" },
596 	{ 0x05, "CTXCTL" },
597 	{ 0x06, "PFIFO" },
598 	{ 0x07, "BAR_READ" },
599 	{ 0x08, "BAR_WRITE" },
600 	{ 0x0b, "PVP" },
601 	{ 0x0c, "PMSPPP" },
602 	{ 0x0d, "PMSVLD" },
603 	{ 0x11, "PCOUNTER" },
604 	{ 0x12, "PDAEMON" },
605 	{ 0x14, "CCACHE" },
606 	{ 0x15, "CCACHE_POST" },
607 	{}
608 };
609 
610 static const struct nvkm_enum
611 gf100_fifo_fault_gpcclient[] = {
612 	{ 0x01, "TEX" },
613 	{ 0x0c, "ESETUP" },
614 	{ 0x0e, "CTXCTL" },
615 	{ 0x0f, "PROP" },
616 	{}
617 };
618 
619 static void
620 gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
621 {
622 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
623 	struct nvkm_device *device = subdev->device;
624 	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
625 	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
626 	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
627 	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
628 	u32 gpc    = (stat & 0x1f000000) >> 24;
629 	u32 client = (stat & 0x00001f00) >> 8;
630 	u32 write  = (stat & 0x00000080);
631 	u32 hub    = (stat & 0x00000040);
632 	u32 reason = (stat & 0x0000000f);
633 	struct nvkm_object *engctx = NULL, *object;
634 	struct nvkm_engine *engine = NULL;
635 	const struct nvkm_enum *er, *eu, *ec;
636 	char gpcid[8] = "";
637 
638 	er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
639 	eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
640 	if (hub) {
641 		ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
642 	} else {
643 		ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
644 		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
645 	}
646 
647 	if (eu) {
648 		switch (eu->data2) {
649 		case NVDEV_SUBDEV_BAR:
650 			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
651 			break;
652 		case NVDEV_SUBDEV_INSTMEM:
653 			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
654 			break;
655 		case NVDEV_ENGINE_IFB:
656 			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
657 			break;
658 		default:
659 			engine = nvkm_engine(fifo, eu->data2);
660 			if (engine)
661 				engctx = nvkm_engctx_get(engine, inst);
662 			break;
663 		}
664 	}
665 
666 	nvkm_error(subdev,
667 		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
668 		   "reason %02x [%s] on channel %d [%010llx %s]\n",
669 		   write ? "write" : "read", (u64)vahi << 32 | valo,
670 		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
671 		   reason, er ? er->name : "", -1, (u64)inst << 12,
672 		   nvkm_client_name(engctx));
673 
674 	object = engctx;
675 	while (object) {
676 		switch (nv_mclass(object)) {
677 		case FERMI_CHANNEL_GPFIFO:
678 			gf100_fifo_recover(fifo, engine, (void *)object);
679 			break;
680 		}
681 		object = object->parent;
682 	}
683 
684 	nvkm_engctx_put(engctx);
685 }
686 
687 static const struct nvkm_bitfield
688 gf100_fifo_pbdma_intr[] = {
689 /*	{ 0x00008000, "" }	seen with null ib push */
690 	{ 0x00200000, "ILLEGAL_MTHD" },
691 	{ 0x00800000, "EMPTY_SUBC" },
692 	{}
693 };
694 
695 static void
696 gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
697 {
698 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
699 	struct nvkm_device *device = subdev->device;
700 	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
701 	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
702 	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
703 	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
704 	u32 subc = (addr & 0x00070000) >> 16;
705 	u32 mthd = (addr & 0x00003ffc);
706 	u32 show= stat;
707 	char msg[128];
708 
709 	if (stat & 0x00800000) {
710 		if (!gf100_fifo_swmthd(fifo, chid, mthd, data))
711 			show &= ~0x00800000;
712 	}
713 
714 	if (show) {
715 		nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
716 		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%s] subc %d "
717 				   "mthd %04x data %08x\n",
718 			   unit, show, msg, chid,
719 			   nvkm_client_name_for_fifo_chid(&fifo->base, chid),
720 			   subc, mthd, data);
721 	}
722 
723 	nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
724 	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
725 }
726 
727 static void
728 gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
729 {
730 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
731 	struct nvkm_device *device = subdev->device;
732 	u32 intr = nvkm_rd32(device, 0x002a00);
733 
734 	if (intr & 0x10000000) {
735 		wake_up(&fifo->runlist.wait);
736 		nvkm_wr32(device, 0x002a00, 0x10000000);
737 		intr &= ~0x10000000;
738 	}
739 
740 	if (intr) {
741 		nvkm_error(subdev, "RUNLIST %08x\n", intr);
742 		nvkm_wr32(device, 0x002a00, intr);
743 	}
744 }
745 
746 static void
747 gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
748 {
749 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
750 	struct nvkm_device *device = subdev->device;
751 	u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
752 	u32 inte = nvkm_rd32(device, 0x002628);
753 	u32 unkn;
754 
755 	nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
756 
757 	for (unkn = 0; unkn < 8; unkn++) {
758 		u32 ints = (intr >> (unkn * 0x04)) & inte;
759 		if (ints & 0x1) {
760 			nvkm_fifo_uevent(&fifo->base);
761 			ints &= ~1;
762 		}
763 		if (ints) {
764 			nvkm_error(subdev, "ENGINE %d %d %01x",
765 				   engn, unkn, ints);
766 			nvkm_mask(device, 0x002628, ints, 0);
767 		}
768 	}
769 }
770 
771 static void
772 gf100_fifo_intr_engine(struct gf100_fifo *fifo)
773 {
774 	struct nvkm_device *device = fifo->base.engine.subdev.device;
775 	u32 mask = nvkm_rd32(device, 0x0025a4);
776 	while (mask) {
777 		u32 unit = __ffs(mask);
778 		gf100_fifo_intr_engine_unit(fifo, unit);
779 		mask &= ~(1 << unit);
780 	}
781 }
782 
783 static void
784 gf100_fifo_intr(struct nvkm_subdev *subdev)
785 {
786 	struct gf100_fifo *fifo = (void *)subdev;
787 	struct nvkm_device *device = fifo->base.engine.subdev.device;
788 	u32 mask = nvkm_rd32(device, 0x002140);
789 	u32 stat = nvkm_rd32(device, 0x002100) & mask;
790 
791 	if (stat & 0x00000001) {
792 		u32 intr = nvkm_rd32(device, 0x00252c);
793 		nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
794 		nvkm_wr32(device, 0x002100, 0x00000001);
795 		stat &= ~0x00000001;
796 	}
797 
798 	if (stat & 0x00000100) {
799 		gf100_fifo_intr_sched(fifo);
800 		nvkm_wr32(device, 0x002100, 0x00000100);
801 		stat &= ~0x00000100;
802 	}
803 
804 	if (stat & 0x00010000) {
805 		u32 intr = nvkm_rd32(device, 0x00256c);
806 		nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
807 		nvkm_wr32(device, 0x002100, 0x00010000);
808 		stat &= ~0x00010000;
809 	}
810 
811 	if (stat & 0x01000000) {
812 		u32 intr = nvkm_rd32(device, 0x00258c);
813 		nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
814 		nvkm_wr32(device, 0x002100, 0x01000000);
815 		stat &= ~0x01000000;
816 	}
817 
818 	if (stat & 0x10000000) {
819 		u32 mask = nvkm_rd32(device, 0x00259c);
820 		while (mask) {
821 			u32 unit = __ffs(mask);
822 			gf100_fifo_intr_fault(fifo, unit);
823 			nvkm_wr32(device, 0x00259c, (1 << unit));
824 			mask &= ~(1 << unit);
825 		}
826 		stat &= ~0x10000000;
827 	}
828 
829 	if (stat & 0x20000000) {
830 		u32 mask = nvkm_rd32(device, 0x0025a0);
831 		while (mask) {
832 			u32 unit = __ffs(mask);
833 			gf100_fifo_intr_pbdma(fifo, unit);
834 			nvkm_wr32(device, 0x0025a0, (1 << unit));
835 			mask &= ~(1 << unit);
836 		}
837 		stat &= ~0x20000000;
838 	}
839 
840 	if (stat & 0x40000000) {
841 		gf100_fifo_intr_runlist(fifo);
842 		stat &= ~0x40000000;
843 	}
844 
845 	if (stat & 0x80000000) {
846 		gf100_fifo_intr_engine(fifo);
847 		stat &= ~0x80000000;
848 	}
849 
850 	if (stat) {
851 		nvkm_error(subdev, "INTR %08x\n", stat);
852 		nvkm_mask(device, 0x002140, stat, 0x00000000);
853 		nvkm_wr32(device, 0x002100, stat);
854 	}
855 }
856 
857 static void
858 gf100_fifo_uevent_init(struct nvkm_event *event, int type, int index)
859 {
860 	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
861 	struct nvkm_device *device = fifo->engine.subdev.device;
862 	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
863 }
864 
865 static void
866 gf100_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
867 {
868 	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
869 	struct nvkm_device *device = fifo->engine.subdev.device;
870 	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
871 }
872 
873 static const struct nvkm_event_func
874 gf100_fifo_uevent_func = {
875 	.ctor = nvkm_fifo_uevent_ctor,
876 	.init = gf100_fifo_uevent_init,
877 	.fini = gf100_fifo_uevent_fini,
878 };
879 
880 static int
881 gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
882 		struct nvkm_oclass *oclass, void *data, u32 size,
883 		struct nvkm_object **pobject)
884 {
885 	struct nvkm_device *device = (void *)parent;
886 	struct nvkm_bar *bar = device->bar;
887 	struct gf100_fifo *fifo;
888 	int ret;
889 
890 	ret = nvkm_fifo_create(parent, engine, oclass, 0, 127, &fifo);
891 	*pobject = nv_object(fifo);
892 	if (ret)
893 		return ret;
894 
895 	INIT_WORK(&fifo->fault, gf100_fifo_recover_work);
896 
897 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
898 			      false, &fifo->runlist.mem[0]);
899 	if (ret)
900 		return ret;
901 
902 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
903 			      false, &fifo->runlist.mem[1]);
904 	if (ret)
905 		return ret;
906 
907 	init_waitqueue_head(&fifo->runlist.wait);
908 
909 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
910 			      0x1000, false, &fifo->user.mem);
911 	if (ret)
912 		return ret;
913 
914 	ret = bar->umap(bar, 128 * 0x1000, 12, &fifo->user.bar);
915 	if (ret)
916 		return ret;
917 
918 	nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
919 
920 	ret = nvkm_event_init(&gf100_fifo_uevent_func, 1, 1, &fifo->base.uevent);
921 	if (ret)
922 		return ret;
923 
924 	nv_subdev(fifo)->unit = 0x00000100;
925 	nv_subdev(fifo)->intr = gf100_fifo_intr;
926 	nv_engine(fifo)->cclass = &gf100_fifo_cclass;
927 	nv_engine(fifo)->sclass = gf100_fifo_sclass;
928 	return 0;
929 }
930 
931 static void
932 gf100_fifo_dtor(struct nvkm_object *object)
933 {
934 	struct gf100_fifo *fifo = (void *)object;
935 
936 	nvkm_vm_put(&fifo->user.bar);
937 	nvkm_memory_del(&fifo->user.mem);
938 	nvkm_memory_del(&fifo->runlist.mem[0]);
939 	nvkm_memory_del(&fifo->runlist.mem[1]);
940 
941 	nvkm_fifo_destroy(&fifo->base);
942 }
943 
944 static int
945 gf100_fifo_init(struct nvkm_object *object)
946 {
947 	struct gf100_fifo *fifo = (void *)object;
948 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
949 	struct nvkm_device *device = subdev->device;
950 	int ret, i;
951 
952 	ret = nvkm_fifo_init(&fifo->base);
953 	if (ret)
954 		return ret;
955 
956 	nvkm_wr32(device, 0x000204, 0xffffffff);
957 	nvkm_wr32(device, 0x002204, 0xffffffff);
958 
959 	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x002204));
960 	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
961 
962 	/* assign engines to PBDMAs */
963 	if (fifo->spoon_nr >= 3) {
964 		nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
965 		nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
966 		nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
967 		nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
968 		nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
969 		nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
970 	}
971 
972 	/* PBDMA[n] */
973 	for (i = 0; i < fifo->spoon_nr; i++) {
974 		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
975 		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
976 		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
977 	}
978 
979 	nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
980 	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
981 
982 	nvkm_wr32(device, 0x002100, 0xffffffff);
983 	nvkm_wr32(device, 0x002140, 0x7fffffff);
984 	nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
985 	return 0;
986 }
987 
988 struct nvkm_oclass *
989 gf100_fifo_oclass = &(struct nvkm_oclass) {
990 	.handle = NV_ENGINE(FIFO, 0xc0),
991 	.ofuncs = &(struct nvkm_ofuncs) {
992 		.ctor = gf100_fifo_ctor,
993 		.dtor = gf100_fifo_dtor,
994 		.init = gf100_fifo_init,
995 		.fini = _nvkm_fifo_fini,
996 	},
997 };
998