1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "nv50.h"
25 
26 #include <core/client.h>
27 #include <subdev/timer.h>
28 #include <engine/fifo.h>
29 
30 static u64
31 nv50_gr_units(struct nvkm_gr *gr)
32 {
33 	return nvkm_rd32(gr->engine.subdev.device, 0x1540);
34 }
35 
36 /*******************************************************************************
37  * Graphics object classes
38  ******************************************************************************/
39 
40 static int
41 nv50_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
42 		    int align, struct nvkm_gpuobj **pgpuobj)
43 {
44 	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16,
45 				  align, false, parent, pgpuobj);
46 	if (ret == 0) {
47 		nvkm_kmap(*pgpuobj);
48 		nvkm_wo32(*pgpuobj, 0x00, object->oclass_name);
49 		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
50 		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
51 		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
52 		nvkm_done(*pgpuobj);
53 	}
54 	return ret;
55 }
56 
57 static const struct nvkm_object_func
58 nv50_gr_object = {
59 	.bind = nv50_gr_object_bind,
60 };
61 
62 static int
63 nv50_gr_object_get(struct nvkm_gr *base, int index, struct nvkm_sclass *sclass)
64 {
65 	struct nv50_gr *gr = nv50_gr(base);
66 	int c = 0;
67 
68 	while (gr->func->sclass[c].oclass) {
69 		if (c++ == index) {
70 			*sclass = gr->func->sclass[index];
71 			return index;
72 		}
73 	}
74 
75 	return c;
76 }
77 
78 /*******************************************************************************
79  * PGRAPH context
80  ******************************************************************************/
81 
82 static int
83 nv50_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
84 		  int align, struct nvkm_gpuobj **pgpuobj)
85 {
86 	struct nv50_gr *gr = nv50_gr_chan(object)->gr;
87 	int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
88 				  align, true, parent, pgpuobj);
89 	if (ret == 0) {
90 		nvkm_kmap(*pgpuobj);
91 		nv50_grctx_fill(gr->base.engine.subdev.device, *pgpuobj);
92 		nvkm_done(*pgpuobj);
93 	}
94 	return ret;
95 }
96 
97 static const struct nvkm_object_func
98 nv50_gr_chan = {
99 	.bind = nv50_gr_chan_bind,
100 };
101 
102 static int
103 nv50_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
104 		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
105 {
106 	struct nv50_gr *gr = nv50_gr(base);
107 	struct nv50_gr_chan *chan;
108 
109 	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
110 		return -ENOMEM;
111 	nvkm_object_ctor(&nv50_gr_chan, oclass, &chan->object);
112 	chan->gr = gr;
113 	*pobject = &chan->object;
114 	return 0;
115 }
116 
117 /*******************************************************************************
118  * PGRAPH engine/subdev functions
119  ******************************************************************************/
120 
121 static const struct nvkm_bitfield nv50_gr_status[] = {
122 	{ 0x00000001, "BUSY" }, /* set when any bit is set */
123 	{ 0x00000002, "DISPATCH" },
124 	{ 0x00000004, "UNK2" },
125 	{ 0x00000008, "UNK3" },
126 	{ 0x00000010, "UNK4" },
127 	{ 0x00000020, "UNK5" },
128 	{ 0x00000040, "M2MF" },
129 	{ 0x00000080, "UNK7" },
130 	{ 0x00000100, "CTXPROG" },
131 	{ 0x00000200, "VFETCH" },
132 	{ 0x00000400, "CCACHE_PREGEOM" },
133 	{ 0x00000800, "STRMOUT_VATTR_POSTGEOM" },
134 	{ 0x00001000, "VCLIP" },
135 	{ 0x00002000, "RATTR_APLANE" },
136 	{ 0x00004000, "TRAST" },
137 	{ 0x00008000, "CLIPID" },
138 	{ 0x00010000, "ZCULL" },
139 	{ 0x00020000, "ENG2D" },
140 	{ 0x00040000, "RMASK" },
141 	{ 0x00080000, "TPC_RAST" },
142 	{ 0x00100000, "TPC_PROP" },
143 	{ 0x00200000, "TPC_TEX" },
144 	{ 0x00400000, "TPC_GEOM" },
145 	{ 0x00800000, "TPC_MP" },
146 	{ 0x01000000, "ROP" },
147 	{}
148 };
149 
150 static const struct nvkm_bitfield
151 nv50_gr_vstatus_0[] = {
152 	{ 0x01, "VFETCH" },
153 	{ 0x02, "CCACHE" },
154 	{ 0x04, "PREGEOM" },
155 	{ 0x08, "POSTGEOM" },
156 	{ 0x10, "VATTR" },
157 	{ 0x20, "STRMOUT" },
158 	{ 0x40, "VCLIP" },
159 	{}
160 };
161 
162 static const struct nvkm_bitfield
163 nv50_gr_vstatus_1[] = {
164 	{ 0x01, "TPC_RAST" },
165 	{ 0x02, "TPC_PROP" },
166 	{ 0x04, "TPC_TEX" },
167 	{ 0x08, "TPC_GEOM" },
168 	{ 0x10, "TPC_MP" },
169 	{}
170 };
171 
172 static const struct nvkm_bitfield
173 nv50_gr_vstatus_2[] = {
174 	{ 0x01, "RATTR" },
175 	{ 0x02, "APLANE" },
176 	{ 0x04, "TRAST" },
177 	{ 0x08, "CLIPID" },
178 	{ 0x10, "ZCULL" },
179 	{ 0x20, "ENG2D" },
180 	{ 0x40, "RMASK" },
181 	{ 0x80, "ROP" },
182 	{}
183 };
184 
185 static void
186 nvkm_gr_vstatus_print(struct nv50_gr *gr, int r,
187 		      const struct nvkm_bitfield *units, u32 status)
188 {
189 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
190 	u32 stat = status;
191 	u8  mask = 0x00;
192 	char msg[64];
193 	int i;
194 
195 	for (i = 0; units[i].name && status; i++) {
196 		if ((status & 7) == 1)
197 			mask |= (1 << i);
198 		status >>= 3;
199 	}
200 
201 	nvkm_snprintbf(msg, sizeof(msg), units, mask);
202 	nvkm_error(subdev, "PGRAPH_VSTATUS%d: %08x [%s]\n", r, stat, msg);
203 }
204 
205 static int
206 g84_gr_tlb_flush(struct nvkm_engine *engine)
207 {
208 	struct nv50_gr *gr = (void *)engine;
209 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
210 	struct nvkm_device *device = subdev->device;
211 	struct nvkm_timer *tmr = device->timer;
212 	bool idle, timeout = false;
213 	unsigned long flags;
214 	char status[128];
215 	u64 start;
216 	u32 tmp;
217 
218 	spin_lock_irqsave(&gr->lock, flags);
219 	nvkm_mask(device, 0x400500, 0x00000001, 0x00000000);
220 
221 	start = nvkm_timer_read(tmr);
222 	do {
223 		idle = true;
224 
225 		for (tmp = nvkm_rd32(device, 0x400380); tmp && idle; tmp >>= 3) {
226 			if ((tmp & 7) == 1)
227 				idle = false;
228 		}
229 
230 		for (tmp = nvkm_rd32(device, 0x400384); tmp && idle; tmp >>= 3) {
231 			if ((tmp & 7) == 1)
232 				idle = false;
233 		}
234 
235 		for (tmp = nvkm_rd32(device, 0x400388); tmp && idle; tmp >>= 3) {
236 			if ((tmp & 7) == 1)
237 				idle = false;
238 		}
239 	} while (!idle &&
240 		 !(timeout = nvkm_timer_read(tmr) - start > 2000000000));
241 
242 	if (timeout) {
243 		nvkm_error(subdev, "PGRAPH TLB flush idle timeout fail\n");
244 
245 		tmp = nvkm_rd32(device, 0x400700);
246 		nvkm_snprintbf(status, sizeof(status), nv50_gr_status, tmp);
247 		nvkm_error(subdev, "PGRAPH_STATUS %08x [%s]\n", tmp, status);
248 
249 		nvkm_gr_vstatus_print(gr, 0, nv50_gr_vstatus_0,
250 				       nvkm_rd32(device, 0x400380));
251 		nvkm_gr_vstatus_print(gr, 1, nv50_gr_vstatus_1,
252 				       nvkm_rd32(device, 0x400384));
253 		nvkm_gr_vstatus_print(gr, 2, nv50_gr_vstatus_2,
254 				       nvkm_rd32(device, 0x400388));
255 	}
256 
257 
258 	nvkm_wr32(device, 0x100c80, 0x00000001);
259 	nvkm_msec(device, 2000,
260 		if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
261 			break;
262 	);
263 	nvkm_mask(device, 0x400500, 0x00000001, 0x00000001);
264 	spin_unlock_irqrestore(&gr->lock, flags);
265 	return timeout ? -EBUSY : 0;
266 }
267 
268 static const struct nvkm_bitfield nv50_mp_exec_errors[] = {
269 	{ 0x01, "STACK_UNDERFLOW" },
270 	{ 0x02, "STACK_MISMATCH" },
271 	{ 0x04, "QUADON_ACTIVE" },
272 	{ 0x08, "TIMEOUT" },
273 	{ 0x10, "INVALID_OPCODE" },
274 	{ 0x20, "PM_OVERFLOW" },
275 	{ 0x40, "BREAKPOINT" },
276 	{}
277 };
278 
279 static const struct nvkm_bitfield nv50_mpc_traps[] = {
280 	{ 0x0000001, "LOCAL_LIMIT_READ" },
281 	{ 0x0000010, "LOCAL_LIMIT_WRITE" },
282 	{ 0x0000040, "STACK_LIMIT" },
283 	{ 0x0000100, "GLOBAL_LIMIT_READ" },
284 	{ 0x0001000, "GLOBAL_LIMIT_WRITE" },
285 	{ 0x0010000, "MP0" },
286 	{ 0x0020000, "MP1" },
287 	{ 0x0040000, "GLOBAL_LIMIT_RED" },
288 	{ 0x0400000, "GLOBAL_LIMIT_ATOM" },
289 	{ 0x4000000, "MP2" },
290 	{}
291 };
292 
293 static const struct nvkm_bitfield nv50_tex_traps[] = {
294 	{ 0x00000001, "" }, /* any bit set? */
295 	{ 0x00000002, "FAULT" },
296 	{ 0x00000004, "STORAGE_TYPE_MISMATCH" },
297 	{ 0x00000008, "LINEAR_MISMATCH" },
298 	{ 0x00000020, "WRONG_MEMTYPE" },
299 	{}
300 };
301 
302 static const struct nvkm_bitfield nv50_gr_trap_m2mf[] = {
303 	{ 0x00000001, "NOTIFY" },
304 	{ 0x00000002, "IN" },
305 	{ 0x00000004, "OUT" },
306 	{}
307 };
308 
309 static const struct nvkm_bitfield nv50_gr_trap_vfetch[] = {
310 	{ 0x00000001, "FAULT" },
311 	{}
312 };
313 
314 static const struct nvkm_bitfield nv50_gr_trap_strmout[] = {
315 	{ 0x00000001, "FAULT" },
316 	{}
317 };
318 
319 static const struct nvkm_bitfield nv50_gr_trap_ccache[] = {
320 	{ 0x00000001, "FAULT" },
321 	{}
322 };
323 
324 /* There must be a *lot* of these. Will take some time to gather them up. */
325 const struct nvkm_enum nv50_data_error_names[] = {
326 	{ 0x00000003, "INVALID_OPERATION", NULL },
327 	{ 0x00000004, "INVALID_VALUE", NULL },
328 	{ 0x00000005, "INVALID_ENUM", NULL },
329 	{ 0x00000008, "INVALID_OBJECT", NULL },
330 	{ 0x00000009, "READ_ONLY_OBJECT", NULL },
331 	{ 0x0000000a, "SUPERVISOR_OBJECT", NULL },
332 	{ 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
333 	{ 0x0000000c, "INVALID_BITFIELD", NULL },
334 	{ 0x0000000d, "BEGIN_END_ACTIVE", NULL },
335 	{ 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
336 	{ 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
337 	{ 0x00000010, "RT_DOUBLE_BIND", NULL },
338 	{ 0x00000011, "RT_TYPES_MISMATCH", NULL },
339 	{ 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
340 	{ 0x00000015, "FP_TOO_FEW_REGS", NULL },
341 	{ 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
342 	{ 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
343 	{ 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
344 	{ 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
345 	{ 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
346 	{ 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
347 	{ 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
348 	{ 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
349 	{ 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
350 	{ 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
351 	{ 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
352 	{ 0x00000024, "VP_ZERO_INPUTS", NULL },
353 	{ 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
354 	{ 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
355 	{ 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
356 	{ 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
357 	{ 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
358 	{ 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
359 	{ 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
360 	{ 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
361 	{ 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
362 	{ 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
363 	{ 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
364 	{ 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
365 	{ 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
366 	{ 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
367 	{ 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
368 	{}
369 };
370 
371 static const struct nvkm_bitfield nv50_gr_intr_name[] = {
372 	{ 0x00000001, "NOTIFY" },
373 	{ 0x00000002, "COMPUTE_QUERY" },
374 	{ 0x00000010, "ILLEGAL_MTHD" },
375 	{ 0x00000020, "ILLEGAL_CLASS" },
376 	{ 0x00000040, "DOUBLE_NOTIFY" },
377 	{ 0x00001000, "CONTEXT_SWITCH" },
378 	{ 0x00010000, "BUFFER_NOTIFY" },
379 	{ 0x00100000, "DATA_ERROR" },
380 	{ 0x00200000, "TRAP" },
381 	{ 0x01000000, "SINGLE_STEP" },
382 	{}
383 };
384 
385 static const struct nvkm_bitfield nv50_gr_trap_prop[] = {
386 	{ 0x00000004, "SURF_WIDTH_OVERRUN" },
387 	{ 0x00000008, "SURF_HEIGHT_OVERRUN" },
388 	{ 0x00000010, "DST2D_FAULT" },
389 	{ 0x00000020, "ZETA_FAULT" },
390 	{ 0x00000040, "RT_FAULT" },
391 	{ 0x00000080, "CUDA_FAULT" },
392 	{ 0x00000100, "DST2D_STORAGE_TYPE_MISMATCH" },
393 	{ 0x00000200, "ZETA_STORAGE_TYPE_MISMATCH" },
394 	{ 0x00000400, "RT_STORAGE_TYPE_MISMATCH" },
395 	{ 0x00000800, "DST2D_LINEAR_MISMATCH" },
396 	{ 0x00001000, "RT_LINEAR_MISMATCH" },
397 	{}
398 };
399 
400 static void
401 nv50_gr_prop_trap(struct nv50_gr *gr, u32 ustatus_addr, u32 ustatus, u32 tp)
402 {
403 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
404 	struct nvkm_device *device = subdev->device;
405 	u32 e0c = nvkm_rd32(device, ustatus_addr + 0x04);
406 	u32 e10 = nvkm_rd32(device, ustatus_addr + 0x08);
407 	u32 e14 = nvkm_rd32(device, ustatus_addr + 0x0c);
408 	u32 e18 = nvkm_rd32(device, ustatus_addr + 0x10);
409 	u32 e1c = nvkm_rd32(device, ustatus_addr + 0x14);
410 	u32 e20 = nvkm_rd32(device, ustatus_addr + 0x18);
411 	u32 e24 = nvkm_rd32(device, ustatus_addr + 0x1c);
412 	char msg[128];
413 
414 	/* CUDA memory: l[], g[] or stack. */
415 	if (ustatus & 0x00000080) {
416 		if (e18 & 0x80000000) {
417 			/* g[] read fault? */
418 			nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
419 					 tp, e14, e10 | ((e18 >> 24) & 0x1f));
420 			e18 &= ~0x1f000000;
421 		} else if (e18 & 0xc) {
422 			/* g[] write fault? */
423 			nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
424 				 tp, e14, e10 | ((e18 >> 7) & 0x1f));
425 			e18 &= ~0x00000f80;
426 		} else {
427 			nvkm_error(subdev, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
428 				 tp, e14, e10);
429 		}
430 		ustatus &= ~0x00000080;
431 	}
432 	if (ustatus) {
433 		nvkm_snprintbf(msg, sizeof(msg), nv50_gr_trap_prop, ustatus);
434 		nvkm_error(subdev, "TRAP_PROP - TP %d - %08x [%s] - "
435 				   "Address %02x%08x\n",
436 			   tp, ustatus, msg, e14, e10);
437 	}
438 	nvkm_error(subdev, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
439 		 tp, e0c, e18, e1c, e20, e24);
440 }
441 
442 static void
443 nv50_gr_mp_trap(struct nv50_gr *gr, int tpid, int display)
444 {
445 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
446 	struct nvkm_device *device = subdev->device;
447 	u32 units = nvkm_rd32(device, 0x1540);
448 	u32 addr, mp10, status, pc, oplow, ophigh;
449 	char msg[128];
450 	int i;
451 	int mps = 0;
452 	for (i = 0; i < 4; i++) {
453 		if (!(units & 1 << (i+24)))
454 			continue;
455 		if (nv_device(gr)->chipset < 0xa0)
456 			addr = 0x408200 + (tpid << 12) + (i << 7);
457 		else
458 			addr = 0x408100 + (tpid << 11) + (i << 7);
459 		mp10 = nvkm_rd32(device, addr + 0x10);
460 		status = nvkm_rd32(device, addr + 0x14);
461 		if (!status)
462 			continue;
463 		if (display) {
464 			nvkm_rd32(device, addr + 0x20);
465 			pc = nvkm_rd32(device, addr + 0x24);
466 			oplow = nvkm_rd32(device, addr + 0x70);
467 			ophigh = nvkm_rd32(device, addr + 0x74);
468 			nvkm_snprintbf(msg, sizeof(msg),
469 				       nv50_mp_exec_errors, status);
470 			nvkm_error(subdev, "TRAP_MP_EXEC - TP %d MP %d: "
471 					   "%08x [%s] at %06x warp %d, "
472 					   "opcode %08x %08x\n",
473 				   tpid, i, status, msg, pc & 0xffffff,
474 				   pc >> 24, oplow, ophigh);
475 		}
476 		nvkm_wr32(device, addr + 0x10, mp10);
477 		nvkm_wr32(device, addr + 0x14, 0);
478 		mps++;
479 	}
480 	if (!mps && display)
481 		nvkm_error(subdev, "TRAP_MP_EXEC - TP %d: "
482 				"No MPs claiming errors?\n", tpid);
483 }
484 
485 static void
486 nv50_gr_tp_trap(struct nv50_gr *gr, int type, u32 ustatus_old,
487 		  u32 ustatus_new, int display, const char *name)
488 {
489 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
490 	struct nvkm_device *device = subdev->device;
491 	u32 units = nvkm_rd32(device, 0x1540);
492 	int tps = 0;
493 	int i, r;
494 	char msg[128];
495 	u32 ustatus_addr, ustatus;
496 	for (i = 0; i < 16; i++) {
497 		if (!(units & (1 << i)))
498 			continue;
499 		if (nv_device(gr)->chipset < 0xa0)
500 			ustatus_addr = ustatus_old + (i << 12);
501 		else
502 			ustatus_addr = ustatus_new + (i << 11);
503 		ustatus = nvkm_rd32(device, ustatus_addr) & 0x7fffffff;
504 		if (!ustatus)
505 			continue;
506 		tps++;
507 		switch (type) {
508 		case 6: /* texture error... unknown for now */
509 			if (display) {
510 				nvkm_error(subdev, "magic set %d:\n", i);
511 				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
512 					nvkm_error(subdev, "\t%08x: %08x\n", r,
513 						   nvkm_rd32(device, r));
514 				if (ustatus) {
515 					nvkm_snprintbf(msg, sizeof(msg),
516 						       nv50_tex_traps, ustatus);
517 					nvkm_error(subdev,
518 						   "%s - TP%d: %08x [%s]\n",
519 						   name, i, ustatus, msg);
520 					ustatus = 0;
521 				}
522 			}
523 			break;
524 		case 7: /* MP error */
525 			if (ustatus & 0x04030000) {
526 				nv50_gr_mp_trap(gr, i, display);
527 				ustatus &= ~0x04030000;
528 			}
529 			if (ustatus && display) {
530 				nvkm_snprintbf(msg, sizeof(msg),
531 					       nv50_mpc_traps, ustatus);
532 				nvkm_error(subdev, "%s - TP%d: %08x [%s]\n",
533 					   name, i, ustatus, msg);
534 				ustatus = 0;
535 			}
536 			break;
537 		case 8: /* PROP error */
538 			if (display)
539 				nv50_gr_prop_trap(
540 						gr, ustatus_addr, ustatus, i);
541 			ustatus = 0;
542 			break;
543 		}
544 		if (ustatus) {
545 			if (display)
546 				nvkm_error(subdev, "%s - TP%d: Unhandled ustatus %08x\n", name, i, ustatus);
547 		}
548 		nvkm_wr32(device, ustatus_addr, 0xc0000000);
549 	}
550 
551 	if (!tps && display)
552 		nvkm_warn(subdev, "%s - No TPs claiming errors?\n", name);
553 }
554 
555 static int
556 nv50_gr_trap_handler(struct nv50_gr *gr, u32 display,
557 		     int chid, u64 inst, const char *name)
558 {
559 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
560 	struct nvkm_device *device = subdev->device;
561 	u32 status = nvkm_rd32(device, 0x400108);
562 	u32 ustatus;
563 	char msg[128];
564 
565 	if (!status && display) {
566 		nvkm_error(subdev, "TRAP: no units reporting traps?\n");
567 		return 1;
568 	}
569 
570 	/* DISPATCH: Relays commands to other units and handles NOTIFY,
571 	 * COND, QUERY. If you get a trap from it, the command is still stuck
572 	 * in DISPATCH and you need to do something about it. */
573 	if (status & 0x001) {
574 		ustatus = nvkm_rd32(device, 0x400804) & 0x7fffffff;
575 		if (!ustatus && display) {
576 			nvkm_error(subdev, "TRAP_DISPATCH - no ustatus?\n");
577 		}
578 
579 		nvkm_wr32(device, 0x400500, 0x00000000);
580 
581 		/* Known to be triggered by screwed up NOTIFY and COND... */
582 		if (ustatus & 0x00000001) {
583 			u32 addr = nvkm_rd32(device, 0x400808);
584 			u32 subc = (addr & 0x00070000) >> 16;
585 			u32 mthd = (addr & 0x00001ffc);
586 			u32 datal = nvkm_rd32(device, 0x40080c);
587 			u32 datah = nvkm_rd32(device, 0x400810);
588 			u32 class = nvkm_rd32(device, 0x400814);
589 			u32 r848 = nvkm_rd32(device, 0x400848);
590 
591 			nvkm_error(subdev, "TRAP DISPATCH_FAULT\n");
592 			if (display && (addr & 0x80000000)) {
593 				nvkm_error(subdev,
594 					   "ch %d [%010llx %s] subc %d "
595 					   "class %04x mthd %04x data %08x%08x "
596 					   "400808 %08x 400848 %08x\n",
597 					   chid, inst, name, subc, class, mthd,
598 					   datah, datal, addr, r848);
599 			} else
600 			if (display) {
601 				nvkm_error(subdev, "no stuck command?\n");
602 			}
603 
604 			nvkm_wr32(device, 0x400808, 0);
605 			nvkm_wr32(device, 0x4008e8, nvkm_rd32(device, 0x4008e8) & 3);
606 			nvkm_wr32(device, 0x400848, 0);
607 			ustatus &= ~0x00000001;
608 		}
609 
610 		if (ustatus & 0x00000002) {
611 			u32 addr = nvkm_rd32(device, 0x40084c);
612 			u32 subc = (addr & 0x00070000) >> 16;
613 			u32 mthd = (addr & 0x00001ffc);
614 			u32 data = nvkm_rd32(device, 0x40085c);
615 			u32 class = nvkm_rd32(device, 0x400814);
616 
617 			nvkm_error(subdev, "TRAP DISPATCH_QUERY\n");
618 			if (display && (addr & 0x80000000)) {
619 				nvkm_error(subdev,
620 					   "ch %d [%010llx %s] subc %d "
621 					   "class %04x mthd %04x data %08x "
622 					   "40084c %08x\n", chid, inst, name,
623 					   subc, class, mthd, data, addr);
624 			} else
625 			if (display) {
626 				nvkm_error(subdev, "no stuck command?\n");
627 			}
628 
629 			nvkm_wr32(device, 0x40084c, 0);
630 			ustatus &= ~0x00000002;
631 		}
632 
633 		if (ustatus && display) {
634 			nvkm_error(subdev, "TRAP_DISPATCH "
635 					   "(unknown %08x)\n", ustatus);
636 		}
637 
638 		nvkm_wr32(device, 0x400804, 0xc0000000);
639 		nvkm_wr32(device, 0x400108, 0x001);
640 		status &= ~0x001;
641 		if (!status)
642 			return 0;
643 	}
644 
645 	/* M2MF: Memory to memory copy engine. */
646 	if (status & 0x002) {
647 		u32 ustatus = nvkm_rd32(device, 0x406800) & 0x7fffffff;
648 		if (display) {
649 			nvkm_snprintbf(msg, sizeof(msg),
650 				       nv50_gr_trap_m2mf, ustatus);
651 			nvkm_error(subdev, "TRAP_M2MF %08x [%s]\n",
652 				   ustatus, msg);
653 			nvkm_error(subdev, "TRAP_M2MF %08x %08x %08x %08x\n",
654 				   nvkm_rd32(device, 0x406804),
655 				   nvkm_rd32(device, 0x406808),
656 				   nvkm_rd32(device, 0x40680c),
657 				   nvkm_rd32(device, 0x406810));
658 		}
659 
660 		/* No sane way found yet -- just reset the bugger. */
661 		nvkm_wr32(device, 0x400040, 2);
662 		nvkm_wr32(device, 0x400040, 0);
663 		nvkm_wr32(device, 0x406800, 0xc0000000);
664 		nvkm_wr32(device, 0x400108, 0x002);
665 		status &= ~0x002;
666 	}
667 
668 	/* VFETCH: Fetches data from vertex buffers. */
669 	if (status & 0x004) {
670 		u32 ustatus = nvkm_rd32(device, 0x400c04) & 0x7fffffff;
671 		if (display) {
672 			nvkm_snprintbf(msg, sizeof(msg),
673 				       nv50_gr_trap_vfetch, ustatus);
674 			nvkm_error(subdev, "TRAP_VFETCH %08x [%s]\n",
675 				   ustatus, msg);
676 			nvkm_error(subdev, "TRAP_VFETCH %08x %08x %08x %08x\n",
677 				   nvkm_rd32(device, 0x400c00),
678 				   nvkm_rd32(device, 0x400c08),
679 				   nvkm_rd32(device, 0x400c0c),
680 				   nvkm_rd32(device, 0x400c10));
681 		}
682 
683 		nvkm_wr32(device, 0x400c04, 0xc0000000);
684 		nvkm_wr32(device, 0x400108, 0x004);
685 		status &= ~0x004;
686 	}
687 
688 	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
689 	if (status & 0x008) {
690 		ustatus = nvkm_rd32(device, 0x401800) & 0x7fffffff;
691 		if (display) {
692 			nvkm_snprintbf(msg, sizeof(msg),
693 				       nv50_gr_trap_strmout, ustatus);
694 			nvkm_error(subdev, "TRAP_STRMOUT %08x [%s]\n",
695 				   ustatus, msg);
696 			nvkm_error(subdev, "TRAP_STRMOUT %08x %08x %08x %08x\n",
697 				   nvkm_rd32(device, 0x401804),
698 				   nvkm_rd32(device, 0x401808),
699 				   nvkm_rd32(device, 0x40180c),
700 				   nvkm_rd32(device, 0x401810));
701 		}
702 
703 		/* No sane way found yet -- just reset the bugger. */
704 		nvkm_wr32(device, 0x400040, 0x80);
705 		nvkm_wr32(device, 0x400040, 0);
706 		nvkm_wr32(device, 0x401800, 0xc0000000);
707 		nvkm_wr32(device, 0x400108, 0x008);
708 		status &= ~0x008;
709 	}
710 
711 	/* CCACHE: Handles code and c[] caches and fills them. */
712 	if (status & 0x010) {
713 		ustatus = nvkm_rd32(device, 0x405018) & 0x7fffffff;
714 		if (display) {
715 			nvkm_snprintbf(msg, sizeof(msg),
716 				       nv50_gr_trap_ccache, ustatus);
717 			nvkm_error(subdev, "TRAP_CCACHE %08x [%s]\n",
718 				   ustatus, msg);
719 			nvkm_error(subdev, "TRAP_CCACHE %08x %08x %08x %08x "
720 					   "%08x %08x %08x\n",
721 				   nvkm_rd32(device, 0x405000),
722 				   nvkm_rd32(device, 0x405004),
723 				   nvkm_rd32(device, 0x405008),
724 				   nvkm_rd32(device, 0x40500c),
725 				   nvkm_rd32(device, 0x405010),
726 				   nvkm_rd32(device, 0x405014),
727 				   nvkm_rd32(device, 0x40501c));
728 		}
729 
730 		nvkm_wr32(device, 0x405018, 0xc0000000);
731 		nvkm_wr32(device, 0x400108, 0x010);
732 		status &= ~0x010;
733 	}
734 
735 	/* Unknown, not seen yet... 0x402000 is the only trap status reg
736 	 * remaining, so try to handle it anyway. Perhaps related to that
737 	 * unknown DMA slot on tesla? */
738 	if (status & 0x20) {
739 		ustatus = nvkm_rd32(device, 0x402000) & 0x7fffffff;
740 		if (display)
741 			nvkm_error(subdev, "TRAP_UNKC04 %08x\n", ustatus);
742 		nvkm_wr32(device, 0x402000, 0xc0000000);
743 		/* no status modifiction on purpose */
744 	}
745 
746 	/* TEXTURE: CUDA texturing units */
747 	if (status & 0x040) {
748 		nv50_gr_tp_trap(gr, 6, 0x408900, 0x408600, display,
749 				    "TRAP_TEXTURE");
750 		nvkm_wr32(device, 0x400108, 0x040);
751 		status &= ~0x040;
752 	}
753 
754 	/* MP: CUDA execution engines. */
755 	if (status & 0x080) {
756 		nv50_gr_tp_trap(gr, 7, 0x408314, 0x40831c, display,
757 				    "TRAP_MP");
758 		nvkm_wr32(device, 0x400108, 0x080);
759 		status &= ~0x080;
760 	}
761 
762 	/* PROP:  Handles TP-initiated uncached memory accesses:
763 	 * l[], g[], stack, 2d surfaces, render targets. */
764 	if (status & 0x100) {
765 		nv50_gr_tp_trap(gr, 8, 0x408e08, 0x408708, display,
766 				    "TRAP_PROP");
767 		nvkm_wr32(device, 0x400108, 0x100);
768 		status &= ~0x100;
769 	}
770 
771 	if (status) {
772 		if (display)
773 			nvkm_error(subdev, "TRAP: unknown %08x\n", status);
774 		nvkm_wr32(device, 0x400108, status);
775 	}
776 
777 	return 1;
778 }
779 
780 static void
781 nv50_gr_intr(struct nvkm_subdev *subdev)
782 {
783 	struct nv50_gr *gr = (void *)subdev;
784 	struct nvkm_device *device = gr->base.engine.subdev.device;
785 	struct nvkm_fifo_chan *chan;
786 	u32 stat = nvkm_rd32(device, 0x400100);
787 	u32 inst = nvkm_rd32(device, 0x40032c) & 0x0fffffff;
788 	u32 addr = nvkm_rd32(device, 0x400704);
789 	u32 subc = (addr & 0x00070000) >> 16;
790 	u32 mthd = (addr & 0x00001ffc);
791 	u32 data = nvkm_rd32(device, 0x400708);
792 	u32 class = nvkm_rd32(device, 0x400814);
793 	u32 show = stat, show_bitfield = stat;
794 	const struct nvkm_enum *en;
795 	unsigned long flags;
796 	const char *name = "unknown";
797 	char msg[128];
798 	int chid = -1;
799 
800 	chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags);
801 	if (chan)  {
802 		name = chan->object.client->name;
803 		chid = chan->chid;
804 	}
805 
806 	if (show & 0x00100000) {
807 		u32 ecode = nvkm_rd32(device, 0x400110);
808 		en = nvkm_enum_find(nv50_data_error_names, ecode);
809 		nvkm_error(subdev, "DATA_ERROR %08x [%s]\n",
810 			   ecode, en ? en->name : "");
811 		show_bitfield &= ~0x00100000;
812 	}
813 
814 	if (stat & 0x00200000) {
815 		if (!nv50_gr_trap_handler(gr, show, chid, (u64)inst << 12, name))
816 			show &= ~0x00200000;
817 		show_bitfield &= ~0x00200000;
818 	}
819 
820 	nvkm_wr32(device, 0x400100, stat);
821 	nvkm_wr32(device, 0x400500, 0x00010001);
822 
823 	if (show) {
824 		show &= show_bitfield;
825 		nvkm_snprintbf(msg, sizeof(msg), nv50_gr_intr_name, show);
826 		nvkm_error(subdev, "%08x [%s] ch %d [%010llx %s] subc %d "
827 				   "class %04x mthd %04x data %08x\n",
828 			   stat, msg, chid, (u64)inst << 12, name,
829 			   subc, class, mthd, data);
830 	}
831 
832 	if (nvkm_rd32(device, 0x400824) & (1 << 31))
833 		nvkm_wr32(device, 0x400824, nvkm_rd32(device, 0x400824) & ~(1 << 31));
834 
835 	nvkm_fifo_chan_put(device->fifo, flags, &chan);
836 }
837 
838 static const struct nv50_gr_func
839 nv50_gr = {
840 	.sclass = {
841 		{ -1, -1, 0x0030, &nv50_gr_object },
842 		{ -1, -1, 0x502d, &nv50_gr_object },
843 		{ -1, -1, 0x5039, &nv50_gr_object },
844 		{ -1, -1, 0x5097, &nv50_gr_object },
845 		{ -1, -1, 0x50c0, &nv50_gr_object },
846 		{}
847 	}
848 };
849 
850 static const struct nv50_gr_func
851 g84_gr = {
852 	.sclass = {
853 		{ -1, -1, 0x0030, &nv50_gr_object },
854 		{ -1, -1, 0x502d, &nv50_gr_object },
855 		{ -1, -1, 0x5039, &nv50_gr_object },
856 		{ -1, -1, 0x50c0, &nv50_gr_object },
857 		{ -1, -1, 0x8297, &nv50_gr_object },
858 		{}
859 	}
860 };
861 
862 static const struct nv50_gr_func
863 gt200_gr = {
864 	.sclass = {
865 		{ -1, -1, 0x0030, &nv50_gr_object },
866 		{ -1, -1, 0x502d, &nv50_gr_object },
867 		{ -1, -1, 0x5039, &nv50_gr_object },
868 		{ -1, -1, 0x50c0, &nv50_gr_object },
869 		{ -1, -1, 0x8397, &nv50_gr_object },
870 		{}
871 	}
872 };
873 
874 static const struct nv50_gr_func
875 gt215_gr = {
876 	.sclass = {
877 		{ -1, -1, 0x0030, &nv50_gr_object },
878 		{ -1, -1, 0x502d, &nv50_gr_object },
879 		{ -1, -1, 0x5039, &nv50_gr_object },
880 		{ -1, -1, 0x50c0, &nv50_gr_object },
881 		{ -1, -1, 0x8597, &nv50_gr_object },
882 		{ -1, -1, 0x85c0, &nv50_gr_object },
883 		{}
884 	}
885 };
886 
887 static const struct nv50_gr_func
888 mcp89_gr = {
889 	.sclass = {
890 		{ -1, -1, 0x0030, &nv50_gr_object },
891 		{ -1, -1, 0x502d, &nv50_gr_object },
892 		{ -1, -1, 0x5039, &nv50_gr_object },
893 		{ -1, -1, 0x50c0, &nv50_gr_object },
894 		{ -1, -1, 0x85c0, &nv50_gr_object },
895 		{ -1, -1, 0x8697, &nv50_gr_object },
896 		{}
897 	}
898 };
899 
900 static const struct nvkm_gr_func
901 nv50_gr_ = {
902 	.chan_new = nv50_gr_chan_new,
903 	.object_get = nv50_gr_object_get,
904 };
905 
906 static int
907 nv50_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
908 	     struct nvkm_oclass *oclass, void *data, u32 size,
909 	     struct nvkm_object **pobject)
910 {
911 	struct nv50_gr *gr;
912 	int ret;
913 
914 	ret = nvkm_gr_create(parent, engine, oclass, true, &gr);
915 	*pobject = nv_object(gr);
916 	if (ret)
917 		return ret;
918 
919 	nv_subdev(gr)->unit = 0x00201000;
920 	nv_subdev(gr)->intr = nv50_gr_intr;
921 
922 	gr->base.func = &nv50_gr_;
923 	gr->base.units = nv50_gr_units;
924 
925 	switch (nv_device(gr)->chipset) {
926 	case 0x50:
927 		gr->func = &nv50_gr;
928 		break;
929 	case 0x84:
930 	case 0x86:
931 	case 0x92:
932 	case 0x94:
933 	case 0x96:
934 	case 0x98:
935 		gr->func = &g84_gr;
936 		break;
937 	case 0xa0:
938 	case 0xaa:
939 	case 0xac:
940 		gr->func = &gt200_gr;
941 		break;
942 	case 0xa3:
943 	case 0xa5:
944 	case 0xa8:
945 		gr->func = &gt215_gr;
946 		break;
947 	case 0xaf:
948 		gr->func = &mcp89_gr;
949 		break;
950 	}
951 
952 	/* unfortunate hw bug workaround... */
953 	if (nv_device(gr)->chipset != 0x50 &&
954 	    nv_device(gr)->chipset != 0xac)
955 		nv_engine(gr)->tlb_flush = g84_gr_tlb_flush;
956 
957 	spin_lock_init(&gr->lock);
958 	return 0;
959 }
960 
961 static int
962 nv50_gr_init(struct nvkm_object *object)
963 {
964 	struct nv50_gr *gr = (void *)object;
965 	struct nvkm_device *device = gr->base.engine.subdev.device;
966 	int ret, units, i;
967 
968 	ret = nvkm_gr_init(&gr->base);
969 	if (ret)
970 		return ret;
971 
972 	/* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */
973 	nvkm_wr32(device, 0x40008c, 0x00000004);
974 
975 	/* reset/enable traps and interrupts */
976 	nvkm_wr32(device, 0x400804, 0xc0000000);
977 	nvkm_wr32(device, 0x406800, 0xc0000000);
978 	nvkm_wr32(device, 0x400c04, 0xc0000000);
979 	nvkm_wr32(device, 0x401800, 0xc0000000);
980 	nvkm_wr32(device, 0x405018, 0xc0000000);
981 	nvkm_wr32(device, 0x402000, 0xc0000000);
982 
983 	units = nvkm_rd32(device, 0x001540);
984 	for (i = 0; i < 16; i++) {
985 		if (!(units & (1 << i)))
986 			continue;
987 
988 		if (nv_device(gr)->chipset < 0xa0) {
989 			nvkm_wr32(device, 0x408900 + (i << 12), 0xc0000000);
990 			nvkm_wr32(device, 0x408e08 + (i << 12), 0xc0000000);
991 			nvkm_wr32(device, 0x408314 + (i << 12), 0xc0000000);
992 		} else {
993 			nvkm_wr32(device, 0x408600 + (i << 11), 0xc0000000);
994 			nvkm_wr32(device, 0x408708 + (i << 11), 0xc0000000);
995 			nvkm_wr32(device, 0x40831c + (i << 11), 0xc0000000);
996 		}
997 	}
998 
999 	nvkm_wr32(device, 0x400108, 0xffffffff);
1000 	nvkm_wr32(device, 0x400138, 0xffffffff);
1001 	nvkm_wr32(device, 0x400100, 0xffffffff);
1002 	nvkm_wr32(device, 0x40013c, 0xffffffff);
1003 	nvkm_wr32(device, 0x400500, 0x00010001);
1004 
1005 	/* upload context program, initialise ctxctl defaults */
1006 	ret = nv50_grctx_init(nv_device(gr), &gr->size);
1007 	if (ret)
1008 		return ret;
1009 
1010 	nvkm_wr32(device, 0x400824, 0x00000000);
1011 	nvkm_wr32(device, 0x400828, 0x00000000);
1012 	nvkm_wr32(device, 0x40082c, 0x00000000);
1013 	nvkm_wr32(device, 0x400830, 0x00000000);
1014 	nvkm_wr32(device, 0x40032c, 0x00000000);
1015 	nvkm_wr32(device, 0x400330, 0x00000000);
1016 
1017 	/* some unknown zcull magic */
1018 	switch (nv_device(gr)->chipset & 0xf0) {
1019 	case 0x50:
1020 	case 0x80:
1021 	case 0x90:
1022 		nvkm_wr32(device, 0x402ca8, 0x00000800);
1023 		break;
1024 	case 0xa0:
1025 	default:
1026 		if (nv_device(gr)->chipset == 0xa0 ||
1027 		    nv_device(gr)->chipset == 0xaa ||
1028 		    nv_device(gr)->chipset == 0xac) {
1029 			nvkm_wr32(device, 0x402ca8, 0x00000802);
1030 		} else {
1031 			nvkm_wr32(device, 0x402cc0, 0x00000000);
1032 			nvkm_wr32(device, 0x402ca8, 0x00000002);
1033 		}
1034 
1035 		break;
1036 	}
1037 
1038 	/* zero out zcull regions */
1039 	for (i = 0; i < 8; i++) {
1040 		nvkm_wr32(device, 0x402c20 + (i * 0x10), 0x00000000);
1041 		nvkm_wr32(device, 0x402c24 + (i * 0x10), 0x00000000);
1042 		nvkm_wr32(device, 0x402c28 + (i * 0x10), 0x00000000);
1043 		nvkm_wr32(device, 0x402c2c + (i * 0x10), 0x00000000);
1044 	}
1045 	return 0;
1046 }
1047 
1048 struct nvkm_oclass
1049 nv50_gr_oclass = {
1050 	.handle = NV_ENGINE(GR, 0x50),
1051 	.ofuncs = &(struct nvkm_ofuncs) {
1052 		.ctor = nv50_gr_ctor,
1053 		.dtor = _nvkm_gr_dtor,
1054 		.init = nv50_gr_init,
1055 		.fini = _nvkm_gr_fini,
1056 	},
1057 };
1058