1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "gk104.h"
25 #include "changk104.h"
26 
27 #include <core/client.h>
28 #include <core/enum.h>
29 #include <core/gpuobj.h>
30 #include <subdev/bar.h>
31 #include <engine/sw.h>
32 
33 #include <nvif/class.h>
34 
35 void
36 gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
37 {
38 	struct nvkm_device *device = fifo->engine.subdev.device;
39 	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
40 }
41 
42 void
43 gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
44 {
45 	struct nvkm_device *device = fifo->engine.subdev.device;
46 	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
47 }
48 
49 void
50 gk104_fifo_runlist_commit(struct gk104_fifo *fifo, u32 engine)
51 {
52 	struct gk104_fifo_engn *engn = &fifo->engine[engine];
53 	struct gk104_fifo_chan *chan;
54 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
55 	struct nvkm_device *device = subdev->device;
56 	struct nvkm_memory *cur;
57 	int nr = 0;
58 
59 	mutex_lock(&subdev->mutex);
60 	cur = engn->runlist[engn->cur_runlist];
61 	engn->cur_runlist = !engn->cur_runlist;
62 
63 	nvkm_kmap(cur);
64 	list_for_each_entry(chan, &engn->chan, head) {
65 		nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
66 		nvkm_wo32(cur, (nr * 8) + 4, 0x00000000);
67 		nr++;
68 	}
69 	nvkm_done(cur);
70 
71 	nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
72 	nvkm_wr32(device, 0x002274, (engine << 20) | nr);
73 
74 	if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
75 			       (engine * 0x08)) & 0x00100000),
76 				msecs_to_jiffies(2000)) == 0)
77 		nvkm_error(subdev, "runlist %d update timeout\n", engine);
78 	mutex_unlock(&subdev->mutex);
79 }
80 
81 void
82 gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
83 {
84 	mutex_lock(&fifo->base.engine.subdev.mutex);
85 	list_del_init(&chan->head);
86 	mutex_unlock(&fifo->base.engine.subdev.mutex);
87 }
88 
89 void
90 gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
91 {
92 	mutex_lock(&fifo->base.engine.subdev.mutex);
93 	list_add_tail(&chan->head, &fifo->engine[chan->engine].chan);
94 	mutex_unlock(&fifo->base.engine.subdev.mutex);
95 }
96 
97 static inline struct nvkm_engine *
98 gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
99 {
100 	struct nvkm_device *device = fifo->base.engine.subdev.device;
101 	u64 subdevs = gk104_fifo_engine_subdev(engn);
102 	if (subdevs)
103 		return nvkm_device_engine(device, __ffs(subdevs));
104 	return NULL;
105 }
106 
107 static void
108 gk104_fifo_recover_work(struct work_struct *work)
109 {
110 	struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault);
111 	struct nvkm_device *device = fifo->base.engine.subdev.device;
112 	struct nvkm_engine *engine;
113 	unsigned long flags;
114 	u32 engn, engm = 0;
115 	u64 mask, todo;
116 
117 	spin_lock_irqsave(&fifo->base.lock, flags);
118 	mask = fifo->mask;
119 	fifo->mask = 0ULL;
120 	spin_unlock_irqrestore(&fifo->base.lock, flags);
121 
122 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
123 		engm |= 1 << gk104_fifo_subdev_engine(engn);
124 	nvkm_mask(device, 0x002630, engm, engm);
125 
126 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
127 		if ((engine = nvkm_device_engine(device, engn))) {
128 			nvkm_subdev_fini(&engine->subdev, false);
129 			WARN_ON(nvkm_subdev_init(&engine->subdev));
130 		}
131 		gk104_fifo_runlist_commit(fifo, gk104_fifo_subdev_engine(engn));
132 	}
133 
134 	nvkm_wr32(device, 0x00262c, engm);
135 	nvkm_mask(device, 0x002630, engm, 0x00000000);
136 }
137 
138 static void
139 gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
140 		  struct gk104_fifo_chan *chan)
141 {
142 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
143 	struct nvkm_device *device = subdev->device;
144 	u32 chid = chan->base.chid;
145 
146 	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
147 		   nvkm_subdev_name[engine->subdev.index], chid);
148 	assert_spin_locked(&fifo->base.lock);
149 
150 	nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
151 	list_del_init(&chan->head);
152 	chan->killed = true;
153 
154 	fifo->mask |= 1ULL << engine->subdev.index;
155 	schedule_work(&fifo->fault);
156 }
157 
158 static const struct nvkm_enum
159 gk104_fifo_bind_reason[] = {
160 	{ 0x01, "BIND_NOT_UNBOUND" },
161 	{ 0x02, "SNOOP_WITHOUT_BAR1" },
162 	{ 0x03, "UNBIND_WHILE_RUNNING" },
163 	{ 0x05, "INVALID_RUNLIST" },
164 	{ 0x06, "INVALID_CTX_TGT" },
165 	{ 0x0b, "UNBIND_WHILE_PARKED" },
166 	{}
167 };
168 
169 static void
170 gk104_fifo_intr_bind(struct gk104_fifo *fifo)
171 {
172 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
173 	struct nvkm_device *device = subdev->device;
174 	u32 intr = nvkm_rd32(device, 0x00252c);
175 	u32 code = intr & 0x000000ff;
176 	const struct nvkm_enum *en =
177 		nvkm_enum_find(gk104_fifo_bind_reason, code);
178 
179 	nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
180 }
181 
182 static const struct nvkm_enum
183 gk104_fifo_sched_reason[] = {
184 	{ 0x0a, "CTXSW_TIMEOUT" },
185 	{}
186 };
187 
188 static void
189 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
190 {
191 	struct nvkm_device *device = fifo->base.engine.subdev.device;
192 	struct nvkm_engine *engine;
193 	struct gk104_fifo_chan *chan;
194 	unsigned long flags;
195 	u32 engn;
196 
197 	spin_lock_irqsave(&fifo->base.lock, flags);
198 	for (engn = 0; engn < ARRAY_SIZE(fifo->engine); engn++) {
199 		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08));
200 		u32 busy = (stat & 0x80000000);
201 		u32 next = (stat & 0x07ff0000) >> 16;
202 		u32 chsw = (stat & 0x00008000);
203 		u32 save = (stat & 0x00004000);
204 		u32 load = (stat & 0x00002000);
205 		u32 prev = (stat & 0x000007ff);
206 		u32 chid = load ? next : prev;
207 		(void)save;
208 
209 		if (busy && chsw) {
210 			list_for_each_entry(chan, &fifo->engine[engn].chan, head) {
211 				if (chan->base.chid == chid) {
212 					engine = gk104_fifo_engine(fifo, engn);
213 					if (!engine)
214 						break;
215 					gk104_fifo_recover(fifo, engine, chan);
216 					break;
217 				}
218 			}
219 		}
220 	}
221 	spin_unlock_irqrestore(&fifo->base.lock, flags);
222 }
223 
224 static void
225 gk104_fifo_intr_sched(struct gk104_fifo *fifo)
226 {
227 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
228 	struct nvkm_device *device = subdev->device;
229 	u32 intr = nvkm_rd32(device, 0x00254c);
230 	u32 code = intr & 0x000000ff;
231 	const struct nvkm_enum *en =
232 		nvkm_enum_find(gk104_fifo_sched_reason, code);
233 
234 	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
235 
236 	switch (code) {
237 	case 0x0a:
238 		gk104_fifo_intr_sched_ctxsw(fifo);
239 		break;
240 	default:
241 		break;
242 	}
243 }
244 
245 static void
246 gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
247 {
248 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
249 	struct nvkm_device *device = subdev->device;
250 	u32 stat = nvkm_rd32(device, 0x00256c);
251 	nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
252 	nvkm_wr32(device, 0x00256c, stat);
253 }
254 
255 static void
256 gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
257 {
258 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
259 	struct nvkm_device *device = subdev->device;
260 	u32 stat = nvkm_rd32(device, 0x00259c);
261 	nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
262 }
263 
264 static const struct nvkm_enum
265 gk104_fifo_fault_engine[] = {
266 	{ 0x00, "GR", NULL, NVKM_ENGINE_GR },
267 	{ 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
268 	{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
269 	{ 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
270 	{ 0x07, "PBDMA0", NULL, NVKM_ENGINE_FIFO },
271 	{ 0x08, "PBDMA1", NULL, NVKM_ENGINE_FIFO },
272 	{ 0x09, "PBDMA2", NULL, NVKM_ENGINE_FIFO },
273 	{ 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
274 	{ 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
275 	{ 0x13, "PERF" },
276 	{ 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
277 	{ 0x15, "CE0", NULL, NVKM_ENGINE_CE0 },
278 	{ 0x16, "CE1", NULL, NVKM_ENGINE_CE1 },
279 	{ 0x17, "PMU" },
280 	{ 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
281 	{ 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 },
282 	{}
283 };
284 
285 static const struct nvkm_enum
286 gk104_fifo_fault_reason[] = {
287 	{ 0x00, "PDE" },
288 	{ 0x01, "PDE_SIZE" },
289 	{ 0x02, "PTE" },
290 	{ 0x03, "VA_LIMIT_VIOLATION" },
291 	{ 0x04, "UNBOUND_INST_BLOCK" },
292 	{ 0x05, "PRIV_VIOLATION" },
293 	{ 0x06, "RO_VIOLATION" },
294 	{ 0x07, "WO_VIOLATION" },
295 	{ 0x08, "PITCH_MASK_VIOLATION" },
296 	{ 0x09, "WORK_CREATION" },
297 	{ 0x0a, "UNSUPPORTED_APERTURE" },
298 	{ 0x0b, "COMPRESSION_FAILURE" },
299 	{ 0x0c, "UNSUPPORTED_KIND" },
300 	{ 0x0d, "REGION_VIOLATION" },
301 	{ 0x0e, "BOTH_PTES_VALID" },
302 	{ 0x0f, "INFO_TYPE_POISONED" },
303 	{}
304 };
305 
306 static const struct nvkm_enum
307 gk104_fifo_fault_hubclient[] = {
308 	{ 0x00, "VIP" },
309 	{ 0x01, "CE0" },
310 	{ 0x02, "CE1" },
311 	{ 0x03, "DNISO" },
312 	{ 0x04, "FE" },
313 	{ 0x05, "FECS" },
314 	{ 0x06, "HOST" },
315 	{ 0x07, "HOST_CPU" },
316 	{ 0x08, "HOST_CPU_NB" },
317 	{ 0x09, "ISO" },
318 	{ 0x0a, "MMU" },
319 	{ 0x0b, "MSPDEC" },
320 	{ 0x0c, "MSPPP" },
321 	{ 0x0d, "MSVLD" },
322 	{ 0x0e, "NISO" },
323 	{ 0x0f, "P2P" },
324 	{ 0x10, "PD" },
325 	{ 0x11, "PERF" },
326 	{ 0x12, "PMU" },
327 	{ 0x13, "RASTERTWOD" },
328 	{ 0x14, "SCC" },
329 	{ 0x15, "SCC_NB" },
330 	{ 0x16, "SEC" },
331 	{ 0x17, "SSYNC" },
332 	{ 0x18, "GR_CE" },
333 	{ 0x19, "CE2" },
334 	{ 0x1a, "XV" },
335 	{ 0x1b, "MMU_NB" },
336 	{ 0x1c, "MSENC" },
337 	{ 0x1d, "DFALCON" },
338 	{ 0x1e, "SKED" },
339 	{ 0x1f, "AFALCON" },
340 	{}
341 };
342 
343 static const struct nvkm_enum
344 gk104_fifo_fault_gpcclient[] = {
345 	{ 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
346 	{ 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
347 	{ 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
348 	{ 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
349 	{ 0x0c, "RAST" },
350 	{ 0x0d, "GCC" },
351 	{ 0x0e, "GPCCS" },
352 	{ 0x0f, "PROP_0" },
353 	{ 0x10, "PROP_1" },
354 	{ 0x11, "PROP_2" },
355 	{ 0x12, "PROP_3" },
356 	{ 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
357 	{ 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
358 	{ 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
359 	{ 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
360 	{ 0x1f, "GPM" },
361 	{ 0x20, "LTP_UTLB_0" },
362 	{ 0x21, "LTP_UTLB_1" },
363 	{ 0x22, "LTP_UTLB_2" },
364 	{ 0x23, "LTP_UTLB_3" },
365 	{ 0x24, "GPC_RGG_UTLB" },
366 	{}
367 };
368 
369 static void
370 gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
371 {
372 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
373 	struct nvkm_device *device = subdev->device;
374 	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
375 	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
376 	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
377 	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
378 	u32 gpc    = (stat & 0x1f000000) >> 24;
379 	u32 client = (stat & 0x00001f00) >> 8;
380 	u32 write  = (stat & 0x00000080);
381 	u32 hub    = (stat & 0x00000040);
382 	u32 reason = (stat & 0x0000000f);
383 	const struct nvkm_enum *er, *eu, *ec;
384 	struct nvkm_engine *engine = NULL;
385 	struct nvkm_fifo_chan *chan;
386 	unsigned long flags;
387 	char gpcid[8] = "";
388 
389 	er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
390 	eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
391 	if (hub) {
392 		ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
393 	} else {
394 		ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
395 		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
396 	}
397 
398 	if (eu) {
399 		switch (eu->data2) {
400 		case NVKM_SUBDEV_BAR:
401 			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
402 			break;
403 		case NVKM_SUBDEV_INSTMEM:
404 			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
405 			break;
406 		case NVKM_ENGINE_IFB:
407 			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
408 			break;
409 		default:
410 			engine = nvkm_device_engine(device, eu->data2);
411 			break;
412 		}
413 	}
414 
415 	chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
416 
417 	nvkm_error(subdev,
418 		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
419 		   "reason %02x [%s] on channel %d [%010llx %s]\n",
420 		   write ? "write" : "read", (u64)vahi << 32 | valo,
421 		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
422 		   reason, er ? er->name : "", chan ? chan->chid : -1,
423 		   (u64)inst << 12,
424 		   chan ? chan->object.client->name : "unknown");
425 
426 	if (engine && chan)
427 		gk104_fifo_recover(fifo, engine, (void *)chan);
428 	nvkm_fifo_chan_put(&fifo->base, flags, &chan);
429 }
430 
431 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
432 	{ 0x00000001, "MEMREQ" },
433 	{ 0x00000002, "MEMACK_TIMEOUT" },
434 	{ 0x00000004, "MEMACK_EXTRA" },
435 	{ 0x00000008, "MEMDAT_TIMEOUT" },
436 	{ 0x00000010, "MEMDAT_EXTRA" },
437 	{ 0x00000020, "MEMFLUSH" },
438 	{ 0x00000040, "MEMOP" },
439 	{ 0x00000080, "LBCONNECT" },
440 	{ 0x00000100, "LBREQ" },
441 	{ 0x00000200, "LBACK_TIMEOUT" },
442 	{ 0x00000400, "LBACK_EXTRA" },
443 	{ 0x00000800, "LBDAT_TIMEOUT" },
444 	{ 0x00001000, "LBDAT_EXTRA" },
445 	{ 0x00002000, "GPFIFO" },
446 	{ 0x00004000, "GPPTR" },
447 	{ 0x00008000, "GPENTRY" },
448 	{ 0x00010000, "GPCRC" },
449 	{ 0x00020000, "PBPTR" },
450 	{ 0x00040000, "PBENTRY" },
451 	{ 0x00080000, "PBCRC" },
452 	{ 0x00100000, "XBARCONNECT" },
453 	{ 0x00200000, "METHOD" },
454 	{ 0x00400000, "METHODCRC" },
455 	{ 0x00800000, "DEVICE" },
456 	{ 0x02000000, "SEMAPHORE" },
457 	{ 0x04000000, "ACQUIRE" },
458 	{ 0x08000000, "PRI" },
459 	{ 0x20000000, "NO_CTXSW_SEG" },
460 	{ 0x40000000, "PBSEG" },
461 	{ 0x80000000, "SIGNATURE" },
462 	{}
463 };
464 
465 static void
466 gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
467 {
468 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
469 	struct nvkm_device *device = subdev->device;
470 	u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
471 	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
472 	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
473 	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
474 	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
475 	u32 subc = (addr & 0x00070000) >> 16;
476 	u32 mthd = (addr & 0x00003ffc);
477 	u32 show = stat;
478 	struct nvkm_fifo_chan *chan;
479 	unsigned long flags;
480 	char msg[128];
481 
482 	if (stat & 0x00800000) {
483 		if (device->sw) {
484 			if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
485 				show &= ~0x00800000;
486 		}
487 		nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
488 	}
489 
490 	if (show) {
491 		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
492 		chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
493 		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
494 				   "subc %d mthd %04x data %08x\n",
495 			   unit, show, msg, chid, chan ? chan->inst->addr : 0,
496 			   chan ? chan->object.client->name : "unknown",
497 			   subc, mthd, data);
498 		nvkm_fifo_chan_put(&fifo->base, flags, &chan);
499 	}
500 
501 	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
502 }
503 
504 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
505 	{ 0x00000001, "HCE_RE_ILLEGAL_OP" },
506 	{ 0x00000002, "HCE_RE_ALIGNB" },
507 	{ 0x00000004, "HCE_PRIV" },
508 	{ 0x00000008, "HCE_ILLEGAL_MTHD" },
509 	{ 0x00000010, "HCE_ILLEGAL_CLASS" },
510 	{}
511 };
512 
513 static void
514 gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
515 {
516 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
517 	struct nvkm_device *device = subdev->device;
518 	u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
519 	u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
520 	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
521 	char msg[128];
522 
523 	if (stat) {
524 		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
525 		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
526 			   unit, stat, msg, chid,
527 			   nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
528 			   nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
529 	}
530 
531 	nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
532 }
533 
534 static void
535 gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
536 {
537 	struct nvkm_device *device = fifo->base.engine.subdev.device;
538 	u32 mask = nvkm_rd32(device, 0x002a00);
539 	while (mask) {
540 		u32 engn = __ffs(mask);
541 		wake_up(&fifo->engine[engn].wait);
542 		nvkm_wr32(device, 0x002a00, 1 << engn);
543 		mask &= ~(1 << engn);
544 	}
545 }
546 
547 static void
548 gk104_fifo_intr_engine(struct gk104_fifo *fifo)
549 {
550 	nvkm_fifo_uevent(&fifo->base);
551 }
552 
553 void
554 gk104_fifo_intr(struct nvkm_fifo *base)
555 {
556 	struct gk104_fifo *fifo = gk104_fifo(base);
557 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
558 	struct nvkm_device *device = subdev->device;
559 	u32 mask = nvkm_rd32(device, 0x002140);
560 	u32 stat = nvkm_rd32(device, 0x002100) & mask;
561 
562 	if (stat & 0x00000001) {
563 		gk104_fifo_intr_bind(fifo);
564 		nvkm_wr32(device, 0x002100, 0x00000001);
565 		stat &= ~0x00000001;
566 	}
567 
568 	if (stat & 0x00000010) {
569 		nvkm_error(subdev, "PIO_ERROR\n");
570 		nvkm_wr32(device, 0x002100, 0x00000010);
571 		stat &= ~0x00000010;
572 	}
573 
574 	if (stat & 0x00000100) {
575 		gk104_fifo_intr_sched(fifo);
576 		nvkm_wr32(device, 0x002100, 0x00000100);
577 		stat &= ~0x00000100;
578 	}
579 
580 	if (stat & 0x00010000) {
581 		gk104_fifo_intr_chsw(fifo);
582 		nvkm_wr32(device, 0x002100, 0x00010000);
583 		stat &= ~0x00010000;
584 	}
585 
586 	if (stat & 0x00800000) {
587 		nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
588 		nvkm_wr32(device, 0x002100, 0x00800000);
589 		stat &= ~0x00800000;
590 	}
591 
592 	if (stat & 0x01000000) {
593 		nvkm_error(subdev, "LB_ERROR\n");
594 		nvkm_wr32(device, 0x002100, 0x01000000);
595 		stat &= ~0x01000000;
596 	}
597 
598 	if (stat & 0x08000000) {
599 		gk104_fifo_intr_dropped_fault(fifo);
600 		nvkm_wr32(device, 0x002100, 0x08000000);
601 		stat &= ~0x08000000;
602 	}
603 
604 	if (stat & 0x10000000) {
605 		u32 mask = nvkm_rd32(device, 0x00259c);
606 		while (mask) {
607 			u32 unit = __ffs(mask);
608 			gk104_fifo_intr_fault(fifo, unit);
609 			nvkm_wr32(device, 0x00259c, (1 << unit));
610 			mask &= ~(1 << unit);
611 		}
612 		stat &= ~0x10000000;
613 	}
614 
615 	if (stat & 0x20000000) {
616 		u32 mask = nvkm_rd32(device, 0x0025a0);
617 		while (mask) {
618 			u32 unit = __ffs(mask);
619 			gk104_fifo_intr_pbdma_0(fifo, unit);
620 			gk104_fifo_intr_pbdma_1(fifo, unit);
621 			nvkm_wr32(device, 0x0025a0, (1 << unit));
622 			mask &= ~(1 << unit);
623 		}
624 		stat &= ~0x20000000;
625 	}
626 
627 	if (stat & 0x40000000) {
628 		gk104_fifo_intr_runlist(fifo);
629 		stat &= ~0x40000000;
630 	}
631 
632 	if (stat & 0x80000000) {
633 		nvkm_wr32(device, 0x002100, 0x80000000);
634 		gk104_fifo_intr_engine(fifo);
635 		stat &= ~0x80000000;
636 	}
637 
638 	if (stat) {
639 		nvkm_error(subdev, "INTR %08x\n", stat);
640 		nvkm_mask(device, 0x002140, stat, 0x00000000);
641 		nvkm_wr32(device, 0x002100, stat);
642 	}
643 }
644 
645 void
646 gk104_fifo_fini(struct nvkm_fifo *base)
647 {
648 	struct gk104_fifo *fifo = gk104_fifo(base);
649 	struct nvkm_device *device = fifo->base.engine.subdev.device;
650 	flush_work(&fifo->fault);
651 	/* allow mmu fault interrupts, even when we're not using fifo */
652 	nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
653 }
654 
655 int
656 gk104_fifo_oneinit(struct nvkm_fifo *base)
657 {
658 	struct gk104_fifo *fifo = gk104_fifo(base);
659 	struct nvkm_device *device = fifo->base.engine.subdev.device;
660 	int ret, i;
661 
662 	for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
663 		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
664 				      0x8000, 0x1000, false,
665 				      &fifo->engine[i].runlist[0]);
666 		if (ret)
667 			return ret;
668 
669 		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
670 				      0x8000, 0x1000, false,
671 				      &fifo->engine[i].runlist[1]);
672 		if (ret)
673 			return ret;
674 
675 		init_waitqueue_head(&fifo->engine[i].wait);
676 		INIT_LIST_HEAD(&fifo->engine[i].chan);
677 	}
678 
679 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
680 			      fifo->base.nr * 0x200, 0x1000, true,
681 			      &fifo->user.mem);
682 	if (ret)
683 		return ret;
684 
685 	ret = nvkm_bar_umap(device->bar, fifo->base.nr * 0x200, 12,
686 			    &fifo->user.bar);
687 	if (ret)
688 		return ret;
689 
690 	nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
691 	return 0;
692 }
693 
694 void
695 gk104_fifo_init(struct nvkm_fifo *base)
696 {
697 	struct gk104_fifo *fifo = gk104_fifo(base);
698 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
699 	struct nvkm_device *device = subdev->device;
700 	int i;
701 
702 	/* enable all available PBDMA units */
703 	nvkm_wr32(device, 0x000204, 0xffffffff);
704 	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204));
705 	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
706 
707 	/* PBDMA[n] */
708 	for (i = 0; i < fifo->spoon_nr; i++) {
709 		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
710 		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
711 		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
712 	}
713 
714 	/* PBDMA[n].HCE */
715 	for (i = 0; i < fifo->spoon_nr; i++) {
716 		nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
717 		nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
718 	}
719 
720 	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
721 
722 	nvkm_wr32(device, 0x002100, 0xffffffff);
723 	nvkm_wr32(device, 0x002140, 0x7fffffff);
724 }
725 
726 void *
727 gk104_fifo_dtor(struct nvkm_fifo *base)
728 {
729 	struct gk104_fifo *fifo = gk104_fifo(base);
730 	int i;
731 
732 	nvkm_vm_put(&fifo->user.bar);
733 	nvkm_memory_del(&fifo->user.mem);
734 
735 	for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
736 		nvkm_memory_del(&fifo->engine[i].runlist[1]);
737 		nvkm_memory_del(&fifo->engine[i].runlist[0]);
738 	}
739 
740 	return fifo;
741 }
742 
743 int
744 gk104_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
745 		int index, int nr, struct nvkm_fifo **pfifo)
746 {
747 	struct gk104_fifo *fifo;
748 
749 	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
750 		return -ENOMEM;
751 	INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
752 	*pfifo = &fifo->base;
753 
754 	return nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
755 }
756 
757 static const struct nvkm_fifo_func
758 gk104_fifo = {
759 	.dtor = gk104_fifo_dtor,
760 	.oneinit = gk104_fifo_oneinit,
761 	.init = gk104_fifo_init,
762 	.fini = gk104_fifo_fini,
763 	.intr = gk104_fifo_intr,
764 	.uevent_init = gk104_fifo_uevent_init,
765 	.uevent_fini = gk104_fifo_uevent_fini,
766 	.chan = {
767 		&gk104_fifo_gpfifo_oclass,
768 		NULL
769 	},
770 };
771 
772 int
773 gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
774 {
775 	return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo);
776 }
777