1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "gk104.h"
25 #include "changk104.h"
26 
27 #include <core/client.h>
28 #include <core/enum.h>
29 #include <core/gpuobj.h>
30 #include <core/handle.h>
31 #include <subdev/bar.h>
32 #include <engine/sw.h>
33 
34 #include <nvif/class.h>
35 
36 void
37 gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
38 {
39 	struct nvkm_device *device = fifo->engine.subdev.device;
40 	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
41 }
42 
43 void
44 gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
45 {
46 	struct nvkm_device *device = fifo->engine.subdev.device;
47 	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
48 }
49 
50 void
51 gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
52 {
53 	struct gk104_fifo_engn *engn = &fifo->engine[engine];
54 	struct gk104_fifo_chan *chan;
55 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
56 	struct nvkm_device *device = subdev->device;
57 	struct nvkm_memory *cur;
58 	int nr = 0;
59 
60 	mutex_lock(&subdev->mutex);
61 	cur = engn->runlist[engn->cur_runlist];
62 	engn->cur_runlist = !engn->cur_runlist;
63 
64 	nvkm_kmap(cur);
65 	list_for_each_entry(chan, &engn->chan, head) {
66 		nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
67 		nvkm_wo32(cur, (nr * 8) + 4, 0x00000000);
68 		nr++;
69 	}
70 	nvkm_done(cur);
71 
72 	nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
73 	nvkm_wr32(device, 0x002274, (engine << 20) | nr);
74 
75 	if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
76 			       (engine * 0x08)) & 0x00100000),
77 				msecs_to_jiffies(2000)) == 0)
78 		nvkm_error(subdev, "runlist %d update timeout\n", engine);
79 	mutex_unlock(&subdev->mutex);
80 }
81 
82 static inline struct nvkm_engine *
83 gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
84 {
85 	struct nvkm_device *device = fifo->base.engine.subdev.device;
86 	u64 subdevs = gk104_fifo_engine_subdev(engn);
87 	if (subdevs)
88 		return nvkm_device_engine(device, __ffs(subdevs));
89 	return NULL;
90 }
91 
92 static void
93 gk104_fifo_recover_work(struct work_struct *work)
94 {
95 	struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault);
96 	struct nvkm_device *device = fifo->base.engine.subdev.device;
97 	struct nvkm_engine *engine;
98 	unsigned long flags;
99 	u32 engn, engm = 0;
100 	u64 mask, todo;
101 
102 	spin_lock_irqsave(&fifo->base.lock, flags);
103 	mask = fifo->mask;
104 	fifo->mask = 0ULL;
105 	spin_unlock_irqrestore(&fifo->base.lock, flags);
106 
107 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
108 		engm |= 1 << gk104_fifo_subdev_engine(engn);
109 	nvkm_mask(device, 0x002630, engm, engm);
110 
111 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
112 		if ((engine = nvkm_device_engine(device, engn))) {
113 			nvkm_subdev_fini(&engine->subdev, false);
114 			WARN_ON(nvkm_subdev_init(&engine->subdev));
115 		}
116 		gk104_fifo_runlist_update(fifo, gk104_fifo_subdev_engine(engn));
117 	}
118 
119 	nvkm_wr32(device, 0x00262c, engm);
120 	nvkm_mask(device, 0x002630, engm, 0x00000000);
121 }
122 
123 static void
124 gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
125 		  struct gk104_fifo_chan *chan)
126 {
127 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
128 	struct nvkm_device *device = subdev->device;
129 	u32 chid = chan->base.chid;
130 
131 	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
132 		   nvkm_subdev_name[engine->subdev.index], chid);
133 	assert_spin_locked(&fifo->base.lock);
134 
135 	nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
136 	list_del_init(&chan->head);
137 	chan->killed = true;
138 
139 	fifo->mask |= 1ULL << engine->subdev.index;
140 	schedule_work(&fifo->fault);
141 }
142 
143 static const struct nvkm_enum
144 gk104_fifo_bind_reason[] = {
145 	{ 0x01, "BIND_NOT_UNBOUND" },
146 	{ 0x02, "SNOOP_WITHOUT_BAR1" },
147 	{ 0x03, "UNBIND_WHILE_RUNNING" },
148 	{ 0x05, "INVALID_RUNLIST" },
149 	{ 0x06, "INVALID_CTX_TGT" },
150 	{ 0x0b, "UNBIND_WHILE_PARKED" },
151 	{}
152 };
153 
154 static void
155 gk104_fifo_intr_bind(struct gk104_fifo *fifo)
156 {
157 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
158 	struct nvkm_device *device = subdev->device;
159 	u32 intr = nvkm_rd32(device, 0x00252c);
160 	u32 code = intr & 0x000000ff;
161 	const struct nvkm_enum *en =
162 		nvkm_enum_find(gk104_fifo_bind_reason, code);
163 
164 	nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
165 }
166 
167 static const struct nvkm_enum
168 gk104_fifo_sched_reason[] = {
169 	{ 0x0a, "CTXSW_TIMEOUT" },
170 	{}
171 };
172 
173 static void
174 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
175 {
176 	struct nvkm_device *device = fifo->base.engine.subdev.device;
177 	struct nvkm_engine *engine;
178 	struct gk104_fifo_chan *chan;
179 	unsigned long flags;
180 	u32 engn;
181 
182 	spin_lock_irqsave(&fifo->base.lock, flags);
183 	for (engn = 0; engn < ARRAY_SIZE(fifo->engine); engn++) {
184 		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
185 		u32 busy = (stat & 0x80000000);
186 		u32 next = (stat & 0x07ff0000) >> 16;
187 		u32 chsw = (stat & 0x00008000);
188 		u32 save = (stat & 0x00004000);
189 		u32 load = (stat & 0x00002000);
190 		u32 prev = (stat & 0x000007ff);
191 		u32 chid = load ? next : prev;
192 		(void)save;
193 
194 		if (busy && chsw) {
195 			list_for_each_entry(chan, &fifo->engine[engn].chan, head) {
196 				if (chan->base.chid == chid) {
197 					engine = gk104_fifo_engine(fifo, engn);
198 					if (!engine)
199 						break;
200 					gk104_fifo_recover(fifo, engine, chan);
201 					break;
202 				}
203 			}
204 		}
205 	}
206 	spin_unlock_irqrestore(&fifo->base.lock, flags);
207 }
208 
209 static void
210 gk104_fifo_intr_sched(struct gk104_fifo *fifo)
211 {
212 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
213 	struct nvkm_device *device = subdev->device;
214 	u32 intr = nvkm_rd32(device, 0x00254c);
215 	u32 code = intr & 0x000000ff;
216 	const struct nvkm_enum *en =
217 		nvkm_enum_find(gk104_fifo_sched_reason, code);
218 
219 	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
220 
221 	switch (code) {
222 	case 0x0a:
223 		gk104_fifo_intr_sched_ctxsw(fifo);
224 		break;
225 	default:
226 		break;
227 	}
228 }
229 
230 static void
231 gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
232 {
233 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
234 	struct nvkm_device *device = subdev->device;
235 	u32 stat = nvkm_rd32(device, 0x00256c);
236 	nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
237 	nvkm_wr32(device, 0x00256c, stat);
238 }
239 
240 static void
241 gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
242 {
243 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
244 	struct nvkm_device *device = subdev->device;
245 	u32 stat = nvkm_rd32(device, 0x00259c);
246 	nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
247 }
248 
249 static const struct nvkm_enum
250 gk104_fifo_fault_engine[] = {
251 	{ 0x00, "GR", NULL, NVKM_ENGINE_GR },
252 	{ 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
253 	{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
254 	{ 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
255 	{ 0x07, "PBDMA0", NULL, NVKM_ENGINE_FIFO },
256 	{ 0x08, "PBDMA1", NULL, NVKM_ENGINE_FIFO },
257 	{ 0x09, "PBDMA2", NULL, NVKM_ENGINE_FIFO },
258 	{ 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
259 	{ 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
260 	{ 0x13, "PERF" },
261 	{ 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
262 	{ 0x15, "CE0", NULL, NVKM_ENGINE_CE0 },
263 	{ 0x16, "CE1", NULL, NVKM_ENGINE_CE1 },
264 	{ 0x17, "PMU" },
265 	{ 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
266 	{ 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 },
267 	{}
268 };
269 
270 static const struct nvkm_enum
271 gk104_fifo_fault_reason[] = {
272 	{ 0x00, "PDE" },
273 	{ 0x01, "PDE_SIZE" },
274 	{ 0x02, "PTE" },
275 	{ 0x03, "VA_LIMIT_VIOLATION" },
276 	{ 0x04, "UNBOUND_INST_BLOCK" },
277 	{ 0x05, "PRIV_VIOLATION" },
278 	{ 0x06, "RO_VIOLATION" },
279 	{ 0x07, "WO_VIOLATION" },
280 	{ 0x08, "PITCH_MASK_VIOLATION" },
281 	{ 0x09, "WORK_CREATION" },
282 	{ 0x0a, "UNSUPPORTED_APERTURE" },
283 	{ 0x0b, "COMPRESSION_FAILURE" },
284 	{ 0x0c, "UNSUPPORTED_KIND" },
285 	{ 0x0d, "REGION_VIOLATION" },
286 	{ 0x0e, "BOTH_PTES_VALID" },
287 	{ 0x0f, "INFO_TYPE_POISONED" },
288 	{}
289 };
290 
291 static const struct nvkm_enum
292 gk104_fifo_fault_hubclient[] = {
293 	{ 0x00, "VIP" },
294 	{ 0x01, "CE0" },
295 	{ 0x02, "CE1" },
296 	{ 0x03, "DNISO" },
297 	{ 0x04, "FE" },
298 	{ 0x05, "FECS" },
299 	{ 0x06, "HOST" },
300 	{ 0x07, "HOST_CPU" },
301 	{ 0x08, "HOST_CPU_NB" },
302 	{ 0x09, "ISO" },
303 	{ 0x0a, "MMU" },
304 	{ 0x0b, "MSPDEC" },
305 	{ 0x0c, "MSPPP" },
306 	{ 0x0d, "MSVLD" },
307 	{ 0x0e, "NISO" },
308 	{ 0x0f, "P2P" },
309 	{ 0x10, "PD" },
310 	{ 0x11, "PERF" },
311 	{ 0x12, "PMU" },
312 	{ 0x13, "RASTERTWOD" },
313 	{ 0x14, "SCC" },
314 	{ 0x15, "SCC_NB" },
315 	{ 0x16, "SEC" },
316 	{ 0x17, "SSYNC" },
317 	{ 0x18, "GR_CE" },
318 	{ 0x19, "CE2" },
319 	{ 0x1a, "XV" },
320 	{ 0x1b, "MMU_NB" },
321 	{ 0x1c, "MSENC" },
322 	{ 0x1d, "DFALCON" },
323 	{ 0x1e, "SKED" },
324 	{ 0x1f, "AFALCON" },
325 	{}
326 };
327 
328 static const struct nvkm_enum
329 gk104_fifo_fault_gpcclient[] = {
330 	{ 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
331 	{ 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
332 	{ 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
333 	{ 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
334 	{ 0x0c, "RAST" },
335 	{ 0x0d, "GCC" },
336 	{ 0x0e, "GPCCS" },
337 	{ 0x0f, "PROP_0" },
338 	{ 0x10, "PROP_1" },
339 	{ 0x11, "PROP_2" },
340 	{ 0x12, "PROP_3" },
341 	{ 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
342 	{ 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
343 	{ 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
344 	{ 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
345 	{ 0x1f, "GPM" },
346 	{ 0x20, "LTP_UTLB_0" },
347 	{ 0x21, "LTP_UTLB_1" },
348 	{ 0x22, "LTP_UTLB_2" },
349 	{ 0x23, "LTP_UTLB_3" },
350 	{ 0x24, "GPC_RGG_UTLB" },
351 	{}
352 };
353 
354 static void
355 gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
356 {
357 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
358 	struct nvkm_device *device = subdev->device;
359 	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
360 	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
361 	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
362 	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
363 	u32 gpc    = (stat & 0x1f000000) >> 24;
364 	u32 client = (stat & 0x00001f00) >> 8;
365 	u32 write  = (stat & 0x00000080);
366 	u32 hub    = (stat & 0x00000040);
367 	u32 reason = (stat & 0x0000000f);
368 	const struct nvkm_enum *er, *eu, *ec;
369 	struct nvkm_engine *engine = NULL;
370 	struct nvkm_fifo_chan *chan;
371 	unsigned long flags;
372 	char gpcid[8] = "";
373 
374 	er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
375 	eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
376 	if (hub) {
377 		ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
378 	} else {
379 		ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
380 		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
381 	}
382 
383 	if (eu) {
384 		switch (eu->data2) {
385 		case NVKM_SUBDEV_BAR:
386 			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
387 			break;
388 		case NVKM_SUBDEV_INSTMEM:
389 			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
390 			break;
391 		case NVKM_ENGINE_IFB:
392 			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
393 			break;
394 		default:
395 			engine = nvkm_device_engine(device, eu->data2);
396 			break;
397 		}
398 	}
399 
400 	chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
401 
402 	nvkm_error(subdev,
403 		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
404 		   "reason %02x [%s] on channel %d [%010llx %s]\n",
405 		   write ? "write" : "read", (u64)vahi << 32 | valo,
406 		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
407 		   reason, er ? er->name : "", chan ? chan->chid : -1,
408 		   (u64)inst << 12,
409 		   chan ? chan->object.client->name : "unknown");
410 
411 	if (engine && chan)
412 		gk104_fifo_recover(fifo, engine, (void *)chan);
413 	nvkm_fifo_chan_put(&fifo->base, flags, &chan);
414 }
415 
416 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
417 	{ 0x00000001, "MEMREQ" },
418 	{ 0x00000002, "MEMACK_TIMEOUT" },
419 	{ 0x00000004, "MEMACK_EXTRA" },
420 	{ 0x00000008, "MEMDAT_TIMEOUT" },
421 	{ 0x00000010, "MEMDAT_EXTRA" },
422 	{ 0x00000020, "MEMFLUSH" },
423 	{ 0x00000040, "MEMOP" },
424 	{ 0x00000080, "LBCONNECT" },
425 	{ 0x00000100, "LBREQ" },
426 	{ 0x00000200, "LBACK_TIMEOUT" },
427 	{ 0x00000400, "LBACK_EXTRA" },
428 	{ 0x00000800, "LBDAT_TIMEOUT" },
429 	{ 0x00001000, "LBDAT_EXTRA" },
430 	{ 0x00002000, "GPFIFO" },
431 	{ 0x00004000, "GPPTR" },
432 	{ 0x00008000, "GPENTRY" },
433 	{ 0x00010000, "GPCRC" },
434 	{ 0x00020000, "PBPTR" },
435 	{ 0x00040000, "PBENTRY" },
436 	{ 0x00080000, "PBCRC" },
437 	{ 0x00100000, "XBARCONNECT" },
438 	{ 0x00200000, "METHOD" },
439 	{ 0x00400000, "METHODCRC" },
440 	{ 0x00800000, "DEVICE" },
441 	{ 0x02000000, "SEMAPHORE" },
442 	{ 0x04000000, "ACQUIRE" },
443 	{ 0x08000000, "PRI" },
444 	{ 0x20000000, "NO_CTXSW_SEG" },
445 	{ 0x40000000, "PBSEG" },
446 	{ 0x80000000, "SIGNATURE" },
447 	{}
448 };
449 
450 static void
451 gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
452 {
453 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
454 	struct nvkm_device *device = subdev->device;
455 	u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
456 	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
457 	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
458 	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
459 	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
460 	u32 subc = (addr & 0x00070000) >> 16;
461 	u32 mthd = (addr & 0x00003ffc);
462 	u32 show = stat;
463 	struct nvkm_fifo_chan *chan;
464 	unsigned long flags;
465 	char msg[128];
466 
467 	if (stat & 0x00800000) {
468 		if (device->sw) {
469 			if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
470 				show &= ~0x00800000;
471 		}
472 		nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
473 	}
474 
475 	if (show) {
476 		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
477 		chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
478 		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
479 				   "subc %d mthd %04x data %08x\n",
480 			   unit, show, msg, chid, chan ? chan->inst->addr : 0,
481 			   chan ? chan->object.client->name : "unknown",
482 			   subc, mthd, data);
483 		nvkm_fifo_chan_put(&fifo->base, flags, &chan);
484 	}
485 
486 	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
487 }
488 
489 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
490 	{ 0x00000001, "HCE_RE_ILLEGAL_OP" },
491 	{ 0x00000002, "HCE_RE_ALIGNB" },
492 	{ 0x00000004, "HCE_PRIV" },
493 	{ 0x00000008, "HCE_ILLEGAL_MTHD" },
494 	{ 0x00000010, "HCE_ILLEGAL_CLASS" },
495 	{}
496 };
497 
498 static void
499 gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
500 {
501 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
502 	struct nvkm_device *device = subdev->device;
503 	u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
504 	u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
505 	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
506 	char msg[128];
507 
508 	if (stat) {
509 		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
510 		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
511 			   unit, stat, msg, chid,
512 			   nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
513 			   nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
514 	}
515 
516 	nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
517 }
518 
519 static void
520 gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
521 {
522 	struct nvkm_device *device = fifo->base.engine.subdev.device;
523 	u32 mask = nvkm_rd32(device, 0x002a00);
524 	while (mask) {
525 		u32 engn = __ffs(mask);
526 		wake_up(&fifo->engine[engn].wait);
527 		nvkm_wr32(device, 0x002a00, 1 << engn);
528 		mask &= ~(1 << engn);
529 	}
530 }
531 
532 static void
533 gk104_fifo_intr_engine(struct gk104_fifo *fifo)
534 {
535 	nvkm_fifo_uevent(&fifo->base);
536 }
537 
538 void
539 gk104_fifo_intr(struct nvkm_fifo *base)
540 {
541 	struct gk104_fifo *fifo = gk104_fifo(base);
542 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
543 	struct nvkm_device *device = subdev->device;
544 	u32 mask = nvkm_rd32(device, 0x002140);
545 	u32 stat = nvkm_rd32(device, 0x002100) & mask;
546 
547 	if (stat & 0x00000001) {
548 		gk104_fifo_intr_bind(fifo);
549 		nvkm_wr32(device, 0x002100, 0x00000001);
550 		stat &= ~0x00000001;
551 	}
552 
553 	if (stat & 0x00000010) {
554 		nvkm_error(subdev, "PIO_ERROR\n");
555 		nvkm_wr32(device, 0x002100, 0x00000010);
556 		stat &= ~0x00000010;
557 	}
558 
559 	if (stat & 0x00000100) {
560 		gk104_fifo_intr_sched(fifo);
561 		nvkm_wr32(device, 0x002100, 0x00000100);
562 		stat &= ~0x00000100;
563 	}
564 
565 	if (stat & 0x00010000) {
566 		gk104_fifo_intr_chsw(fifo);
567 		nvkm_wr32(device, 0x002100, 0x00010000);
568 		stat &= ~0x00010000;
569 	}
570 
571 	if (stat & 0x00800000) {
572 		nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
573 		nvkm_wr32(device, 0x002100, 0x00800000);
574 		stat &= ~0x00800000;
575 	}
576 
577 	if (stat & 0x01000000) {
578 		nvkm_error(subdev, "LB_ERROR\n");
579 		nvkm_wr32(device, 0x002100, 0x01000000);
580 		stat &= ~0x01000000;
581 	}
582 
583 	if (stat & 0x08000000) {
584 		gk104_fifo_intr_dropped_fault(fifo);
585 		nvkm_wr32(device, 0x002100, 0x08000000);
586 		stat &= ~0x08000000;
587 	}
588 
589 	if (stat & 0x10000000) {
590 		u32 mask = nvkm_rd32(device, 0x00259c);
591 		while (mask) {
592 			u32 unit = __ffs(mask);
593 			gk104_fifo_intr_fault(fifo, unit);
594 			nvkm_wr32(device, 0x00259c, (1 << unit));
595 			mask &= ~(1 << unit);
596 		}
597 		stat &= ~0x10000000;
598 	}
599 
600 	if (stat & 0x20000000) {
601 		u32 mask = nvkm_rd32(device, 0x0025a0);
602 		while (mask) {
603 			u32 unit = __ffs(mask);
604 			gk104_fifo_intr_pbdma_0(fifo, unit);
605 			gk104_fifo_intr_pbdma_1(fifo, unit);
606 			nvkm_wr32(device, 0x0025a0, (1 << unit));
607 			mask &= ~(1 << unit);
608 		}
609 		stat &= ~0x20000000;
610 	}
611 
612 	if (stat & 0x40000000) {
613 		gk104_fifo_intr_runlist(fifo);
614 		stat &= ~0x40000000;
615 	}
616 
617 	if (stat & 0x80000000) {
618 		nvkm_wr32(device, 0x002100, 0x80000000);
619 		gk104_fifo_intr_engine(fifo);
620 		stat &= ~0x80000000;
621 	}
622 
623 	if (stat) {
624 		nvkm_error(subdev, "INTR %08x\n", stat);
625 		nvkm_mask(device, 0x002140, stat, 0x00000000);
626 		nvkm_wr32(device, 0x002100, stat);
627 	}
628 }
629 
630 void
631 gk104_fifo_fini(struct nvkm_fifo *base)
632 {
633 	struct gk104_fifo *fifo = gk104_fifo(base);
634 	struct nvkm_device *device = fifo->base.engine.subdev.device;
635 	flush_work(&fifo->fault);
636 	/* allow mmu fault interrupts, even when we're not using fifo */
637 	nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
638 }
639 
640 int
641 gk104_fifo_oneinit(struct nvkm_fifo *base)
642 {
643 	struct gk104_fifo *fifo = gk104_fifo(base);
644 	struct nvkm_device *device = fifo->base.engine.subdev.device;
645 	int ret, i;
646 
647 	for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
648 		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
649 				      0x8000, 0x1000, false,
650 				      &fifo->engine[i].runlist[0]);
651 		if (ret)
652 			return ret;
653 
654 		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
655 				      0x8000, 0x1000, false,
656 				      &fifo->engine[i].runlist[1]);
657 		if (ret)
658 			return ret;
659 
660 		init_waitqueue_head(&fifo->engine[i].wait);
661 		INIT_LIST_HEAD(&fifo->engine[i].chan);
662 	}
663 
664 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
665 			      fifo->base.nr * 0x200, 0x1000, true,
666 			      &fifo->user.mem);
667 	if (ret)
668 		return ret;
669 
670 	ret = nvkm_bar_umap(device->bar, fifo->base.nr * 0x200, 12,
671 			    &fifo->user.bar);
672 	if (ret)
673 		return ret;
674 
675 	nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
676 	return 0;
677 }
678 
679 void
680 gk104_fifo_init(struct nvkm_fifo *base)
681 {
682 	struct gk104_fifo *fifo = gk104_fifo(base);
683 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
684 	struct nvkm_device *device = subdev->device;
685 	int i;
686 
687 	/* enable all available PBDMA units */
688 	nvkm_wr32(device, 0x000204, 0xffffffff);
689 	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204));
690 	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
691 
692 	/* PBDMA[n] */
693 	for (i = 0; i < fifo->spoon_nr; i++) {
694 		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
695 		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
696 		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
697 	}
698 
699 	/* PBDMA[n].HCE */
700 	for (i = 0; i < fifo->spoon_nr; i++) {
701 		nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
702 		nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
703 	}
704 
705 	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
706 
707 	nvkm_wr32(device, 0x002100, 0xffffffff);
708 	nvkm_wr32(device, 0x002140, 0x7fffffff);
709 }
710 
711 void *
712 gk104_fifo_dtor(struct nvkm_fifo *base)
713 {
714 	struct gk104_fifo *fifo = gk104_fifo(base);
715 	int i;
716 
717 	nvkm_vm_put(&fifo->user.bar);
718 	nvkm_memory_del(&fifo->user.mem);
719 
720 	for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
721 		nvkm_memory_del(&fifo->engine[i].runlist[1]);
722 		nvkm_memory_del(&fifo->engine[i].runlist[0]);
723 	}
724 
725 	return fifo;
726 }
727 
728 int
729 gk104_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
730 		int index, int nr, struct nvkm_fifo **pfifo)
731 {
732 	struct gk104_fifo *fifo;
733 
734 	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
735 		return -ENOMEM;
736 	INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
737 	*pfifo = &fifo->base;
738 
739 	return nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
740 }
741 
742 static const struct nvkm_fifo_func
743 gk104_fifo = {
744 	.dtor = gk104_fifo_dtor,
745 	.oneinit = gk104_fifo_oneinit,
746 	.init = gk104_fifo_init,
747 	.fini = gk104_fifo_fini,
748 	.intr = gk104_fifo_intr,
749 	.uevent_init = gk104_fifo_uevent_init,
750 	.uevent_fini = gk104_fifo_uevent_fini,
751 	.chan = {
752 		&gk104_fifo_gpfifo_oclass,
753 		NULL
754 	},
755 };
756 
757 int
758 gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
759 {
760 	return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo);
761 }
762