1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "gk104.h"
25 #include "changk104.h"
26 
27 #include <core/client.h>
28 #include <core/enum.h>
29 #include <core/gpuobj.h>
30 #include <subdev/bar.h>
31 #include <engine/sw.h>
32 
33 #include <nvif/class.h>
34 
35 void
36 gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
37 {
38 	struct nvkm_device *device = fifo->engine.subdev.device;
39 	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
40 }
41 
42 void
43 gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
44 {
45 	struct nvkm_device *device = fifo->engine.subdev.device;
46 	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
47 }
48 
49 void
50 gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
51 {
52 	struct gk104_fifo_chan *chan;
53 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
54 	struct nvkm_device *device = subdev->device;
55 	struct nvkm_memory *mem;
56 	int nr = 0;
57 	int target;
58 
59 	mutex_lock(&subdev->mutex);
60 	mem = fifo->runlist[runl].mem[fifo->runlist[runl].next];
61 	fifo->runlist[runl].next = !fifo->runlist[runl].next;
62 
63 	nvkm_kmap(mem);
64 	list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
65 		nvkm_wo32(mem, (nr * 8) + 0, chan->base.chid);
66 		nvkm_wo32(mem, (nr * 8) + 4, 0x00000000);
67 		nr++;
68 	}
69 	nvkm_done(mem);
70 
71 	if (nvkm_memory_target(mem) == NVKM_MEM_TARGET_VRAM)
72 		target = 0;
73 	else
74 		target = 3;
75 
76 	nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
77 				    (target << 28));
78 	nvkm_wr32(device, 0x002274, (runl << 20) | nr);
79 
80 	if (wait_event_timeout(fifo->runlist[runl].wait,
81 			       !(nvkm_rd32(device, 0x002284 + (runl * 0x08))
82 				       & 0x00100000),
83 			       msecs_to_jiffies(2000)) == 0)
84 		nvkm_error(subdev, "runlist %d update timeout\n", runl);
85 	mutex_unlock(&subdev->mutex);
86 }
87 
88 void
89 gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
90 {
91 	mutex_lock(&fifo->base.engine.subdev.mutex);
92 	list_del_init(&chan->head);
93 	mutex_unlock(&fifo->base.engine.subdev.mutex);
94 }
95 
96 void
97 gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
98 {
99 	mutex_lock(&fifo->base.engine.subdev.mutex);
100 	list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan);
101 	mutex_unlock(&fifo->base.engine.subdev.mutex);
102 }
103 
104 static void
105 gk104_fifo_recover_work(struct work_struct *w)
106 {
107 	struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
108 	struct nvkm_device *device = fifo->base.engine.subdev.device;
109 	struct nvkm_engine *engine;
110 	unsigned long flags;
111 	u32 engn, engm = 0;
112 	u64 mask, todo;
113 
114 	spin_lock_irqsave(&fifo->base.lock, flags);
115 	mask = fifo->recover.mask;
116 	fifo->recover.mask = 0ULL;
117 	spin_unlock_irqrestore(&fifo->base.lock, flags);
118 
119 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
120 		engm |= 1 << gk104_fifo_subdev_engine(engn);
121 	nvkm_mask(device, 0x002630, engm, engm);
122 
123 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
124 		if ((engine = nvkm_device_engine(device, engn))) {
125 			nvkm_subdev_fini(&engine->subdev, false);
126 			WARN_ON(nvkm_subdev_init(&engine->subdev));
127 		}
128 		gk104_fifo_runlist_commit(fifo, gk104_fifo_subdev_engine(engn));
129 	}
130 
131 	nvkm_wr32(device, 0x00262c, engm);
132 	nvkm_mask(device, 0x002630, engm, 0x00000000);
133 }
134 
135 static void
136 gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
137 		  struct gk104_fifo_chan *chan)
138 {
139 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
140 	struct nvkm_device *device = subdev->device;
141 	u32 chid = chan->base.chid;
142 
143 	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
144 		   nvkm_subdev_name[engine->subdev.index], chid);
145 	assert_spin_locked(&fifo->base.lock);
146 
147 	nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
148 	list_del_init(&chan->head);
149 	chan->killed = true;
150 
151 	fifo->recover.mask |= 1ULL << engine->subdev.index;
152 	schedule_work(&fifo->recover.work);
153 }
154 
155 static const struct nvkm_enum
156 gk104_fifo_bind_reason[] = {
157 	{ 0x01, "BIND_NOT_UNBOUND" },
158 	{ 0x02, "SNOOP_WITHOUT_BAR1" },
159 	{ 0x03, "UNBIND_WHILE_RUNNING" },
160 	{ 0x05, "INVALID_RUNLIST" },
161 	{ 0x06, "INVALID_CTX_TGT" },
162 	{ 0x0b, "UNBIND_WHILE_PARKED" },
163 	{}
164 };
165 
166 static void
167 gk104_fifo_intr_bind(struct gk104_fifo *fifo)
168 {
169 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
170 	struct nvkm_device *device = subdev->device;
171 	u32 intr = nvkm_rd32(device, 0x00252c);
172 	u32 code = intr & 0x000000ff;
173 	const struct nvkm_enum *en =
174 		nvkm_enum_find(gk104_fifo_bind_reason, code);
175 
176 	nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
177 }
178 
179 static const struct nvkm_enum
180 gk104_fifo_sched_reason[] = {
181 	{ 0x0a, "CTXSW_TIMEOUT" },
182 	{}
183 };
184 
185 static void
186 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
187 {
188 	struct nvkm_device *device = fifo->base.engine.subdev.device;
189 	struct gk104_fifo_chan *chan;
190 	unsigned long flags;
191 	u32 engn;
192 
193 	spin_lock_irqsave(&fifo->base.lock, flags);
194 	for (engn = 0; engn < fifo->engine_nr; engn++) {
195 		struct nvkm_engine *engine = fifo->engine[engn].engine;
196 		int runl = fifo->engine[engn].runl;
197 		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08));
198 		u32 busy = (stat & 0x80000000);
199 		u32 next = (stat & 0x0fff0000) >> 16;
200 		u32 chsw = (stat & 0x00008000);
201 		u32 save = (stat & 0x00004000);
202 		u32 load = (stat & 0x00002000);
203 		u32 prev = (stat & 0x00000fff);
204 		u32 chid = load ? next : prev;
205 		(void)save;
206 
207 		if (!busy || !chsw)
208 			continue;
209 
210 		list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
211 			if (chan->base.chid == chid && engine) {
212 				gk104_fifo_recover(fifo, engine, chan);
213 				break;
214 			}
215 		}
216 	}
217 	spin_unlock_irqrestore(&fifo->base.lock, flags);
218 }
219 
220 static void
221 gk104_fifo_intr_sched(struct gk104_fifo *fifo)
222 {
223 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
224 	struct nvkm_device *device = subdev->device;
225 	u32 intr = nvkm_rd32(device, 0x00254c);
226 	u32 code = intr & 0x000000ff;
227 	const struct nvkm_enum *en =
228 		nvkm_enum_find(gk104_fifo_sched_reason, code);
229 
230 	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
231 
232 	switch (code) {
233 	case 0x0a:
234 		gk104_fifo_intr_sched_ctxsw(fifo);
235 		break;
236 	default:
237 		break;
238 	}
239 }
240 
241 static void
242 gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
243 {
244 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
245 	struct nvkm_device *device = subdev->device;
246 	u32 stat = nvkm_rd32(device, 0x00256c);
247 	nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
248 	nvkm_wr32(device, 0x00256c, stat);
249 }
250 
251 static void
252 gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
253 {
254 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
255 	struct nvkm_device *device = subdev->device;
256 	u32 stat = nvkm_rd32(device, 0x00259c);
257 	nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
258 }
259 
260 static const struct nvkm_enum
261 gk104_fifo_fault_engine[] = {
262 	{ 0x00, "GR", NULL, NVKM_ENGINE_GR },
263 	{ 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
264 	{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
265 	{ 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
266 	{ 0x07, "PBDMA0", NULL, NVKM_ENGINE_FIFO },
267 	{ 0x08, "PBDMA1", NULL, NVKM_ENGINE_FIFO },
268 	{ 0x09, "PBDMA2", NULL, NVKM_ENGINE_FIFO },
269 	{ 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
270 	{ 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
271 	{ 0x13, "PERF" },
272 	{ 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
273 	{ 0x15, "CE0", NULL, NVKM_ENGINE_CE0 },
274 	{ 0x16, "CE1", NULL, NVKM_ENGINE_CE1 },
275 	{ 0x17, "PMU" },
276 	{ 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
277 	{ 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 },
278 	{}
279 };
280 
281 static const struct nvkm_enum
282 gk104_fifo_fault_reason[] = {
283 	{ 0x00, "PDE" },
284 	{ 0x01, "PDE_SIZE" },
285 	{ 0x02, "PTE" },
286 	{ 0x03, "VA_LIMIT_VIOLATION" },
287 	{ 0x04, "UNBOUND_INST_BLOCK" },
288 	{ 0x05, "PRIV_VIOLATION" },
289 	{ 0x06, "RO_VIOLATION" },
290 	{ 0x07, "WO_VIOLATION" },
291 	{ 0x08, "PITCH_MASK_VIOLATION" },
292 	{ 0x09, "WORK_CREATION" },
293 	{ 0x0a, "UNSUPPORTED_APERTURE" },
294 	{ 0x0b, "COMPRESSION_FAILURE" },
295 	{ 0x0c, "UNSUPPORTED_KIND" },
296 	{ 0x0d, "REGION_VIOLATION" },
297 	{ 0x0e, "BOTH_PTES_VALID" },
298 	{ 0x0f, "INFO_TYPE_POISONED" },
299 	{}
300 };
301 
302 static const struct nvkm_enum
303 gk104_fifo_fault_hubclient[] = {
304 	{ 0x00, "VIP" },
305 	{ 0x01, "CE0" },
306 	{ 0x02, "CE1" },
307 	{ 0x03, "DNISO" },
308 	{ 0x04, "FE" },
309 	{ 0x05, "FECS" },
310 	{ 0x06, "HOST" },
311 	{ 0x07, "HOST_CPU" },
312 	{ 0x08, "HOST_CPU_NB" },
313 	{ 0x09, "ISO" },
314 	{ 0x0a, "MMU" },
315 	{ 0x0b, "MSPDEC" },
316 	{ 0x0c, "MSPPP" },
317 	{ 0x0d, "MSVLD" },
318 	{ 0x0e, "NISO" },
319 	{ 0x0f, "P2P" },
320 	{ 0x10, "PD" },
321 	{ 0x11, "PERF" },
322 	{ 0x12, "PMU" },
323 	{ 0x13, "RASTERTWOD" },
324 	{ 0x14, "SCC" },
325 	{ 0x15, "SCC_NB" },
326 	{ 0x16, "SEC" },
327 	{ 0x17, "SSYNC" },
328 	{ 0x18, "GR_CE" },
329 	{ 0x19, "CE2" },
330 	{ 0x1a, "XV" },
331 	{ 0x1b, "MMU_NB" },
332 	{ 0x1c, "MSENC" },
333 	{ 0x1d, "DFALCON" },
334 	{ 0x1e, "SKED" },
335 	{ 0x1f, "AFALCON" },
336 	{}
337 };
338 
339 static const struct nvkm_enum
340 gk104_fifo_fault_gpcclient[] = {
341 	{ 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
342 	{ 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
343 	{ 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
344 	{ 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
345 	{ 0x0c, "RAST" },
346 	{ 0x0d, "GCC" },
347 	{ 0x0e, "GPCCS" },
348 	{ 0x0f, "PROP_0" },
349 	{ 0x10, "PROP_1" },
350 	{ 0x11, "PROP_2" },
351 	{ 0x12, "PROP_3" },
352 	{ 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
353 	{ 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
354 	{ 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
355 	{ 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
356 	{ 0x1f, "GPM" },
357 	{ 0x20, "LTP_UTLB_0" },
358 	{ 0x21, "LTP_UTLB_1" },
359 	{ 0x22, "LTP_UTLB_2" },
360 	{ 0x23, "LTP_UTLB_3" },
361 	{ 0x24, "GPC_RGG_UTLB" },
362 	{}
363 };
364 
365 static void
366 gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
367 {
368 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
369 	struct nvkm_device *device = subdev->device;
370 	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
371 	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
372 	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
373 	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
374 	u32 gpc    = (stat & 0x1f000000) >> 24;
375 	u32 client = (stat & 0x00001f00) >> 8;
376 	u32 write  = (stat & 0x00000080);
377 	u32 hub    = (stat & 0x00000040);
378 	u32 reason = (stat & 0x0000000f);
379 	const struct nvkm_enum *er, *eu, *ec;
380 	struct nvkm_engine *engine = NULL;
381 	struct nvkm_fifo_chan *chan;
382 	unsigned long flags;
383 	char gpcid[8] = "";
384 
385 	er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
386 	eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
387 	if (hub) {
388 		ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
389 	} else {
390 		ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
391 		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
392 	}
393 
394 	if (eu && eu->data2) {
395 		switch (eu->data2) {
396 		case NVKM_SUBDEV_BAR:
397 			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
398 			break;
399 		case NVKM_SUBDEV_INSTMEM:
400 			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
401 			break;
402 		case NVKM_ENGINE_IFB:
403 			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
404 			break;
405 		default:
406 			engine = nvkm_device_engine(device, eu->data2);
407 			break;
408 		}
409 	}
410 
411 	chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
412 
413 	nvkm_error(subdev,
414 		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
415 		   "reason %02x [%s] on channel %d [%010llx %s]\n",
416 		   write ? "write" : "read", (u64)vahi << 32 | valo,
417 		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
418 		   reason, er ? er->name : "", chan ? chan->chid : -1,
419 		   (u64)inst << 12,
420 		   chan ? chan->object.client->name : "unknown");
421 
422 	if (engine && chan)
423 		gk104_fifo_recover(fifo, engine, (void *)chan);
424 	nvkm_fifo_chan_put(&fifo->base, flags, &chan);
425 }
426 
427 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
428 	{ 0x00000001, "MEMREQ" },
429 	{ 0x00000002, "MEMACK_TIMEOUT" },
430 	{ 0x00000004, "MEMACK_EXTRA" },
431 	{ 0x00000008, "MEMDAT_TIMEOUT" },
432 	{ 0x00000010, "MEMDAT_EXTRA" },
433 	{ 0x00000020, "MEMFLUSH" },
434 	{ 0x00000040, "MEMOP" },
435 	{ 0x00000080, "LBCONNECT" },
436 	{ 0x00000100, "LBREQ" },
437 	{ 0x00000200, "LBACK_TIMEOUT" },
438 	{ 0x00000400, "LBACK_EXTRA" },
439 	{ 0x00000800, "LBDAT_TIMEOUT" },
440 	{ 0x00001000, "LBDAT_EXTRA" },
441 	{ 0x00002000, "GPFIFO" },
442 	{ 0x00004000, "GPPTR" },
443 	{ 0x00008000, "GPENTRY" },
444 	{ 0x00010000, "GPCRC" },
445 	{ 0x00020000, "PBPTR" },
446 	{ 0x00040000, "PBENTRY" },
447 	{ 0x00080000, "PBCRC" },
448 	{ 0x00100000, "XBARCONNECT" },
449 	{ 0x00200000, "METHOD" },
450 	{ 0x00400000, "METHODCRC" },
451 	{ 0x00800000, "DEVICE" },
452 	{ 0x02000000, "SEMAPHORE" },
453 	{ 0x04000000, "ACQUIRE" },
454 	{ 0x08000000, "PRI" },
455 	{ 0x20000000, "NO_CTXSW_SEG" },
456 	{ 0x40000000, "PBSEG" },
457 	{ 0x80000000, "SIGNATURE" },
458 	{}
459 };
460 
461 static void
462 gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
463 {
464 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
465 	struct nvkm_device *device = subdev->device;
466 	u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
467 	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
468 	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
469 	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
470 	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
471 	u32 subc = (addr & 0x00070000) >> 16;
472 	u32 mthd = (addr & 0x00003ffc);
473 	u32 show = stat;
474 	struct nvkm_fifo_chan *chan;
475 	unsigned long flags;
476 	char msg[128];
477 
478 	if (stat & 0x00800000) {
479 		if (device->sw) {
480 			if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
481 				show &= ~0x00800000;
482 		}
483 		nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
484 	}
485 
486 	if (show) {
487 		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
488 		chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
489 		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
490 				   "subc %d mthd %04x data %08x\n",
491 			   unit, show, msg, chid, chan ? chan->inst->addr : 0,
492 			   chan ? chan->object.client->name : "unknown",
493 			   subc, mthd, data);
494 		nvkm_fifo_chan_put(&fifo->base, flags, &chan);
495 	}
496 
497 	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
498 }
499 
500 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
501 	{ 0x00000001, "HCE_RE_ILLEGAL_OP" },
502 	{ 0x00000002, "HCE_RE_ALIGNB" },
503 	{ 0x00000004, "HCE_PRIV" },
504 	{ 0x00000008, "HCE_ILLEGAL_MTHD" },
505 	{ 0x00000010, "HCE_ILLEGAL_CLASS" },
506 	{}
507 };
508 
509 static void
510 gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
511 {
512 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
513 	struct nvkm_device *device = subdev->device;
514 	u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
515 	u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
516 	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
517 	char msg[128];
518 
519 	if (stat) {
520 		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
521 		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
522 			   unit, stat, msg, chid,
523 			   nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
524 			   nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
525 	}
526 
527 	nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
528 }
529 
530 static void
531 gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
532 {
533 	struct nvkm_device *device = fifo->base.engine.subdev.device;
534 	u32 mask = nvkm_rd32(device, 0x002a00);
535 	while (mask) {
536 		int runl = __ffs(mask);
537 		wake_up(&fifo->runlist[runl].wait);
538 		nvkm_wr32(device, 0x002a00, 1 << runl);
539 		mask &= ~(1 << runl);
540 	}
541 }
542 
543 static void
544 gk104_fifo_intr_engine(struct gk104_fifo *fifo)
545 {
546 	nvkm_fifo_uevent(&fifo->base);
547 }
548 
549 void
550 gk104_fifo_intr(struct nvkm_fifo *base)
551 {
552 	struct gk104_fifo *fifo = gk104_fifo(base);
553 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
554 	struct nvkm_device *device = subdev->device;
555 	u32 mask = nvkm_rd32(device, 0x002140);
556 	u32 stat = nvkm_rd32(device, 0x002100) & mask;
557 
558 	if (stat & 0x00000001) {
559 		gk104_fifo_intr_bind(fifo);
560 		nvkm_wr32(device, 0x002100, 0x00000001);
561 		stat &= ~0x00000001;
562 	}
563 
564 	if (stat & 0x00000010) {
565 		nvkm_error(subdev, "PIO_ERROR\n");
566 		nvkm_wr32(device, 0x002100, 0x00000010);
567 		stat &= ~0x00000010;
568 	}
569 
570 	if (stat & 0x00000100) {
571 		gk104_fifo_intr_sched(fifo);
572 		nvkm_wr32(device, 0x002100, 0x00000100);
573 		stat &= ~0x00000100;
574 	}
575 
576 	if (stat & 0x00010000) {
577 		gk104_fifo_intr_chsw(fifo);
578 		nvkm_wr32(device, 0x002100, 0x00010000);
579 		stat &= ~0x00010000;
580 	}
581 
582 	if (stat & 0x00800000) {
583 		nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
584 		nvkm_wr32(device, 0x002100, 0x00800000);
585 		stat &= ~0x00800000;
586 	}
587 
588 	if (stat & 0x01000000) {
589 		nvkm_error(subdev, "LB_ERROR\n");
590 		nvkm_wr32(device, 0x002100, 0x01000000);
591 		stat &= ~0x01000000;
592 	}
593 
594 	if (stat & 0x08000000) {
595 		gk104_fifo_intr_dropped_fault(fifo);
596 		nvkm_wr32(device, 0x002100, 0x08000000);
597 		stat &= ~0x08000000;
598 	}
599 
600 	if (stat & 0x10000000) {
601 		u32 mask = nvkm_rd32(device, 0x00259c);
602 		while (mask) {
603 			u32 unit = __ffs(mask);
604 			gk104_fifo_intr_fault(fifo, unit);
605 			nvkm_wr32(device, 0x00259c, (1 << unit));
606 			mask &= ~(1 << unit);
607 		}
608 		stat &= ~0x10000000;
609 	}
610 
611 	if (stat & 0x20000000) {
612 		u32 mask = nvkm_rd32(device, 0x0025a0);
613 		while (mask) {
614 			u32 unit = __ffs(mask);
615 			gk104_fifo_intr_pbdma_0(fifo, unit);
616 			gk104_fifo_intr_pbdma_1(fifo, unit);
617 			nvkm_wr32(device, 0x0025a0, (1 << unit));
618 			mask &= ~(1 << unit);
619 		}
620 		stat &= ~0x20000000;
621 	}
622 
623 	if (stat & 0x40000000) {
624 		gk104_fifo_intr_runlist(fifo);
625 		stat &= ~0x40000000;
626 	}
627 
628 	if (stat & 0x80000000) {
629 		nvkm_wr32(device, 0x002100, 0x80000000);
630 		gk104_fifo_intr_engine(fifo);
631 		stat &= ~0x80000000;
632 	}
633 
634 	if (stat) {
635 		nvkm_error(subdev, "INTR %08x\n", stat);
636 		nvkm_mask(device, 0x002140, stat, 0x00000000);
637 		nvkm_wr32(device, 0x002100, stat);
638 	}
639 }
640 
641 void
642 gk104_fifo_fini(struct nvkm_fifo *base)
643 {
644 	struct gk104_fifo *fifo = gk104_fifo(base);
645 	struct nvkm_device *device = fifo->base.engine.subdev.device;
646 	flush_work(&fifo->recover.work);
647 	/* allow mmu fault interrupts, even when we're not using fifo */
648 	nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
649 }
650 
651 int
652 gk104_fifo_oneinit(struct nvkm_fifo *base)
653 {
654 	struct gk104_fifo *fifo = gk104_fifo(base);
655 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
656 	struct nvkm_device *device = subdev->device;
657 	int ret, i;
658 	u32 *map;
659 
660 	/* Determine number of PBDMAs by checking valid enable bits. */
661 	nvkm_wr32(device, 0x000204, 0xffffffff);
662 	fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x000204));
663 	nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
664 
665 	/* Read PBDMA->runlist(s) mapping from HW. */
666 	if (!(map = kzalloc(sizeof(*map) * fifo->pbdma_nr, GFP_KERNEL)))
667 		return -ENOMEM;
668 
669 	for (i = 0; i < fifo->pbdma_nr; i++)
670 		map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04));
671 
672 	/* Read device topology from HW. */
673 	for (i = 0; i < 64; i++) {
674 		int type = -1, pbid = -1, engidx = -1;
675 		int engn = -1, runl = -1, intr = -1, mcen = -1;
676 		int fault = -1, j;
677 		u32 data, addr = 0;
678 
679 		do {
680 			data = nvkm_rd32(device, 0x022700 + (i * 0x04));
681 			nvkm_trace(subdev, "%02x: %08x\n", i, data);
682 			switch (data & 0x00000003) {
683 			case 0x00000000: /* NOT_VALID */
684 				continue;
685 			case 0x00000001: /* DATA */
686 				addr  = (data & 0x00fff000);
687 				fault = (data & 0x000000f8) >> 3;
688 				break;
689 			case 0x00000002: /* ENUM */
690 				if (data & 0x00000020)
691 					engn = (data & 0x3c000000) >> 26;
692 				if (data & 0x00000010)
693 					runl = (data & 0x01e00000) >> 21;
694 				if (data & 0x00000008)
695 					intr = (data & 0x000f8000) >> 15;
696 				if (data & 0x00000004)
697 					mcen = (data & 0x00003e00) >> 9;
698 				break;
699 			case 0x00000003: /* ENGINE_TYPE */
700 				type = (data & 0x7ffffffc) >> 2;
701 				break;
702 			}
703 		} while ((data & 0x80000000) && ++i < 64);
704 
705 		if (!data)
706 			continue;
707 
708 		/* Determine which PBDMA handles requests for this engine. */
709 		for (j = 0; runl >= 0 && j < fifo->pbdma_nr; j++) {
710 			if (map[j] & (1 << runl)) {
711 				pbid = j;
712 				break;
713 			}
714 		}
715 
716 		/* Translate engine type to NVKM engine identifier. */
717 		switch (type) {
718 		case 0x00000000: engidx = NVKM_ENGINE_GR; break;
719 		case 0x00000001: engidx = NVKM_ENGINE_CE0; break;
720 		case 0x00000002: engidx = NVKM_ENGINE_CE1; break;
721 		case 0x00000003: engidx = NVKM_ENGINE_CE2; break;
722 		case 0x00000008: engidx = NVKM_ENGINE_MSPDEC; break;
723 		case 0x00000009: engidx = NVKM_ENGINE_MSPPP; break;
724 		case 0x0000000a: engidx = NVKM_ENGINE_MSVLD; break;
725 		case 0x0000000b: engidx = NVKM_ENGINE_MSENC; break;
726 		case 0x0000000c: engidx = NVKM_ENGINE_VIC; break;
727 		case 0x0000000d: engidx = NVKM_ENGINE_SEC; break;
728 			break;
729 		default:
730 			break;
731 		}
732 
733 		nvkm_debug(subdev, "%02x (%8s): engine %2d runlist %2d "
734 				   "pbdma %2d intr %2d reset %2d "
735 				   "fault %2d addr %06x\n", type,
736 			   engidx < 0 ? NULL : nvkm_subdev_name[engidx],
737 			   engn, runl, pbid, intr, mcen, fault, addr);
738 
739 		/* Mark the engine as supported if everything checks out. */
740 		if (engn >= 0 && runl >= 0) {
741 			fifo->engine[engn].engine = engidx < 0 ? NULL :
742 				nvkm_device_engine(device, engidx);
743 			fifo->engine[engn].runl = runl;
744 			fifo->engine[engn].pbid = pbid;
745 			fifo->engine_nr = max(fifo->engine_nr, engn + 1);
746 			fifo->runlist[runl].engm |= 1 << engn;
747 			fifo->runlist_nr = max(fifo->runlist_nr, runl + 1);
748 		}
749 	}
750 
751 	kfree(map);
752 
753 	for (i = 0; i < fifo->runlist_nr; i++) {
754 		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
755 				      0x8000, 0x1000, false,
756 				      &fifo->runlist[i].mem[0]);
757 		if (ret)
758 			return ret;
759 
760 		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
761 				      0x8000, 0x1000, false,
762 				      &fifo->runlist[i].mem[1]);
763 		if (ret)
764 			return ret;
765 
766 		init_waitqueue_head(&fifo->runlist[i].wait);
767 		INIT_LIST_HEAD(&fifo->runlist[i].chan);
768 	}
769 
770 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
771 			      fifo->base.nr * 0x200, 0x1000, true,
772 			      &fifo->user.mem);
773 	if (ret)
774 		return ret;
775 
776 	ret = nvkm_bar_umap(device->bar, fifo->base.nr * 0x200, 12,
777 			    &fifo->user.bar);
778 	if (ret)
779 		return ret;
780 
781 	nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
782 	return 0;
783 }
784 
785 void
786 gk104_fifo_init(struct nvkm_fifo *base)
787 {
788 	struct gk104_fifo *fifo = gk104_fifo(base);
789 	struct nvkm_device *device = fifo->base.engine.subdev.device;
790 	int i;
791 
792 	/* Enable PBDMAs. */
793 	nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
794 
795 	/* PBDMA[n] */
796 	for (i = 0; i < fifo->pbdma_nr; i++) {
797 		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
798 		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
799 		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
800 	}
801 
802 	/* PBDMA[n].HCE */
803 	for (i = 0; i < fifo->pbdma_nr; i++) {
804 		nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
805 		nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
806 	}
807 
808 	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
809 
810 	nvkm_wr32(device, 0x002100, 0xffffffff);
811 	nvkm_wr32(device, 0x002140, 0x7fffffff);
812 }
813 
814 void *
815 gk104_fifo_dtor(struct nvkm_fifo *base)
816 {
817 	struct gk104_fifo *fifo = gk104_fifo(base);
818 	int i;
819 
820 	nvkm_vm_put(&fifo->user.bar);
821 	nvkm_memory_del(&fifo->user.mem);
822 
823 	for (i = 0; i < fifo->runlist_nr; i++) {
824 		nvkm_memory_del(&fifo->runlist[i].mem[1]);
825 		nvkm_memory_del(&fifo->runlist[i].mem[0]);
826 	}
827 
828 	return fifo;
829 }
830 
831 int
832 gk104_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
833 		int index, int nr, struct nvkm_fifo **pfifo)
834 {
835 	struct gk104_fifo *fifo;
836 
837 	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
838 		return -ENOMEM;
839 	INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work);
840 	*pfifo = &fifo->base;
841 
842 	return nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
843 }
844 
845 static const struct nvkm_fifo_func
846 gk104_fifo = {
847 	.dtor = gk104_fifo_dtor,
848 	.oneinit = gk104_fifo_oneinit,
849 	.init = gk104_fifo_init,
850 	.fini = gk104_fifo_fini,
851 	.intr = gk104_fifo_intr,
852 	.uevent_init = gk104_fifo_uevent_init,
853 	.uevent_fini = gk104_fifo_uevent_fini,
854 	.chan = {
855 		&gk104_fifo_gpfifo_oclass,
856 		NULL
857 	},
858 };
859 
860 int
861 gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
862 {
863 	return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo);
864 }
865