1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "nv50.h"
25 
26 #include <core/client.h>
27 #include <core/gpuobj.h>
28 #include <engine/fifo.h>
29 
30 u64
31 nv50_gr_units(struct nvkm_gr *gr)
32 {
33 	return nvkm_rd32(gr->engine.subdev.device, 0x1540);
34 }
35 
36 /*******************************************************************************
37  * Graphics object classes
38  ******************************************************************************/
39 
40 static int
41 nv50_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
42 		    int align, struct nvkm_gpuobj **pgpuobj)
43 {
44 	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16,
45 				  align, false, parent, pgpuobj);
46 	if (ret == 0) {
47 		nvkm_kmap(*pgpuobj);
48 		nvkm_wo32(*pgpuobj, 0x00, object->oclass);
49 		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
50 		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
51 		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
52 		nvkm_done(*pgpuobj);
53 	}
54 	return ret;
55 }
56 
57 const struct nvkm_object_func
58 nv50_gr_object = {
59 	.bind = nv50_gr_object_bind,
60 };
61 
62 /*******************************************************************************
63  * PGRAPH context
64  ******************************************************************************/
65 
66 static int
67 nv50_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
68 		  int align, struct nvkm_gpuobj **pgpuobj)
69 {
70 	struct nv50_gr *gr = nv50_gr_chan(object)->gr;
71 	int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
72 				  align, true, parent, pgpuobj);
73 	if (ret == 0) {
74 		nvkm_kmap(*pgpuobj);
75 		nv50_grctx_fill(gr->base.engine.subdev.device, *pgpuobj);
76 		nvkm_done(*pgpuobj);
77 	}
78 	return ret;
79 }
80 
81 static const struct nvkm_object_func
82 nv50_gr_chan = {
83 	.bind = nv50_gr_chan_bind,
84 };
85 
86 int
87 nv50_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
88 		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
89 {
90 	struct nv50_gr *gr = nv50_gr(base);
91 	struct nv50_gr_chan *chan;
92 
93 	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
94 		return -ENOMEM;
95 	nvkm_object_ctor(&nv50_gr_chan, oclass, &chan->object);
96 	chan->gr = gr;
97 	*pobject = &chan->object;
98 	return 0;
99 }
100 
101 /*******************************************************************************
102  * PGRAPH engine/subdev functions
103  ******************************************************************************/
104 
105 static const struct nvkm_bitfield nv50_mp_exec_errors[] = {
106 	{ 0x01, "STACK_UNDERFLOW" },
107 	{ 0x02, "STACK_MISMATCH" },
108 	{ 0x04, "QUADON_ACTIVE" },
109 	{ 0x08, "TIMEOUT" },
110 	{ 0x10, "INVALID_OPCODE" },
111 	{ 0x20, "PM_OVERFLOW" },
112 	{ 0x40, "BREAKPOINT" },
113 	{}
114 };
115 
116 static const struct nvkm_bitfield nv50_mpc_traps[] = {
117 	{ 0x0000001, "LOCAL_LIMIT_READ" },
118 	{ 0x0000010, "LOCAL_LIMIT_WRITE" },
119 	{ 0x0000040, "STACK_LIMIT" },
120 	{ 0x0000100, "GLOBAL_LIMIT_READ" },
121 	{ 0x0001000, "GLOBAL_LIMIT_WRITE" },
122 	{ 0x0010000, "MP0" },
123 	{ 0x0020000, "MP1" },
124 	{ 0x0040000, "GLOBAL_LIMIT_RED" },
125 	{ 0x0400000, "GLOBAL_LIMIT_ATOM" },
126 	{ 0x4000000, "MP2" },
127 	{}
128 };
129 
130 static const struct nvkm_bitfield nv50_tex_traps[] = {
131 	{ 0x00000001, "" }, /* any bit set? */
132 	{ 0x00000002, "FAULT" },
133 	{ 0x00000004, "STORAGE_TYPE_MISMATCH" },
134 	{ 0x00000008, "LINEAR_MISMATCH" },
135 	{ 0x00000020, "WRONG_MEMTYPE" },
136 	{}
137 };
138 
139 static const struct nvkm_bitfield nv50_gr_trap_m2mf[] = {
140 	{ 0x00000001, "NOTIFY" },
141 	{ 0x00000002, "IN" },
142 	{ 0x00000004, "OUT" },
143 	{}
144 };
145 
146 static const struct nvkm_bitfield nv50_gr_trap_vfetch[] = {
147 	{ 0x00000001, "FAULT" },
148 	{}
149 };
150 
151 static const struct nvkm_bitfield nv50_gr_trap_strmout[] = {
152 	{ 0x00000001, "FAULT" },
153 	{}
154 };
155 
156 static const struct nvkm_bitfield nv50_gr_trap_ccache[] = {
157 	{ 0x00000001, "FAULT" },
158 	{}
159 };
160 
161 /* There must be a *lot* of these. Will take some time to gather them up. */
162 const struct nvkm_enum nv50_data_error_names[] = {
163 	{ 0x00000003, "INVALID_OPERATION", NULL },
164 	{ 0x00000004, "INVALID_VALUE", NULL },
165 	{ 0x00000005, "INVALID_ENUM", NULL },
166 	{ 0x00000008, "INVALID_OBJECT", NULL },
167 	{ 0x00000009, "READ_ONLY_OBJECT", NULL },
168 	{ 0x0000000a, "SUPERVISOR_OBJECT", NULL },
169 	{ 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
170 	{ 0x0000000c, "INVALID_BITFIELD", NULL },
171 	{ 0x0000000d, "BEGIN_END_ACTIVE", NULL },
172 	{ 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
173 	{ 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
174 	{ 0x00000010, "RT_DOUBLE_BIND", NULL },
175 	{ 0x00000011, "RT_TYPES_MISMATCH", NULL },
176 	{ 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
177 	{ 0x00000015, "FP_TOO_FEW_REGS", NULL },
178 	{ 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
179 	{ 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
180 	{ 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
181 	{ 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
182 	{ 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
183 	{ 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
184 	{ 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
185 	{ 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
186 	{ 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
187 	{ 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
188 	{ 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
189 	{ 0x00000024, "VP_ZERO_INPUTS", NULL },
190 	{ 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
191 	{ 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
192 	{ 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
193 	{ 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
194 	{ 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
195 	{ 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
196 	{ 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
197 	{ 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
198 	{ 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
199 	{ 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
200 	{ 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
201 	{ 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
202 	{ 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
203 	{ 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
204 	{ 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
205 	{}
206 };
207 
208 static const struct nvkm_bitfield nv50_gr_intr_name[] = {
209 	{ 0x00000001, "NOTIFY" },
210 	{ 0x00000002, "COMPUTE_QUERY" },
211 	{ 0x00000010, "ILLEGAL_MTHD" },
212 	{ 0x00000020, "ILLEGAL_CLASS" },
213 	{ 0x00000040, "DOUBLE_NOTIFY" },
214 	{ 0x00001000, "CONTEXT_SWITCH" },
215 	{ 0x00010000, "BUFFER_NOTIFY" },
216 	{ 0x00100000, "DATA_ERROR" },
217 	{ 0x00200000, "TRAP" },
218 	{ 0x01000000, "SINGLE_STEP" },
219 	{}
220 };
221 
222 static const struct nvkm_bitfield nv50_gr_trap_prop[] = {
223 	{ 0x00000004, "SURF_WIDTH_OVERRUN" },
224 	{ 0x00000008, "SURF_HEIGHT_OVERRUN" },
225 	{ 0x00000010, "DST2D_FAULT" },
226 	{ 0x00000020, "ZETA_FAULT" },
227 	{ 0x00000040, "RT_FAULT" },
228 	{ 0x00000080, "CUDA_FAULT" },
229 	{ 0x00000100, "DST2D_STORAGE_TYPE_MISMATCH" },
230 	{ 0x00000200, "ZETA_STORAGE_TYPE_MISMATCH" },
231 	{ 0x00000400, "RT_STORAGE_TYPE_MISMATCH" },
232 	{ 0x00000800, "DST2D_LINEAR_MISMATCH" },
233 	{ 0x00001000, "RT_LINEAR_MISMATCH" },
234 	{}
235 };
236 
237 static void
238 nv50_gr_prop_trap(struct nv50_gr *gr, u32 ustatus_addr, u32 ustatus, u32 tp)
239 {
240 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
241 	struct nvkm_device *device = subdev->device;
242 	u32 e0c = nvkm_rd32(device, ustatus_addr + 0x04);
243 	u32 e10 = nvkm_rd32(device, ustatus_addr + 0x08);
244 	u32 e14 = nvkm_rd32(device, ustatus_addr + 0x0c);
245 	u32 e18 = nvkm_rd32(device, ustatus_addr + 0x10);
246 	u32 e1c = nvkm_rd32(device, ustatus_addr + 0x14);
247 	u32 e20 = nvkm_rd32(device, ustatus_addr + 0x18);
248 	u32 e24 = nvkm_rd32(device, ustatus_addr + 0x1c);
249 	char msg[128];
250 
251 	/* CUDA memory: l[], g[] or stack. */
252 	if (ustatus & 0x00000080) {
253 		if (e18 & 0x80000000) {
254 			/* g[] read fault? */
255 			nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
256 					 tp, e14, e10 | ((e18 >> 24) & 0x1f));
257 			e18 &= ~0x1f000000;
258 		} else if (e18 & 0xc) {
259 			/* g[] write fault? */
260 			nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
261 				 tp, e14, e10 | ((e18 >> 7) & 0x1f));
262 			e18 &= ~0x00000f80;
263 		} else {
264 			nvkm_error(subdev, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
265 				 tp, e14, e10);
266 		}
267 		ustatus &= ~0x00000080;
268 	}
269 	if (ustatus) {
270 		nvkm_snprintbf(msg, sizeof(msg), nv50_gr_trap_prop, ustatus);
271 		nvkm_error(subdev, "TRAP_PROP - TP %d - %08x [%s] - "
272 				   "Address %02x%08x\n",
273 			   tp, ustatus, msg, e14, e10);
274 	}
275 	nvkm_error(subdev, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
276 		 tp, e0c, e18, e1c, e20, e24);
277 }
278 
279 static void
280 nv50_gr_mp_trap(struct nv50_gr *gr, int tpid, int display)
281 {
282 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
283 	struct nvkm_device *device = subdev->device;
284 	u32 units = nvkm_rd32(device, 0x1540);
285 	u32 addr, mp10, status, pc, oplow, ophigh;
286 	char msg[128];
287 	int i;
288 	int mps = 0;
289 	for (i = 0; i < 4; i++) {
290 		if (!(units & 1 << (i+24)))
291 			continue;
292 		if (device->chipset < 0xa0)
293 			addr = 0x408200 + (tpid << 12) + (i << 7);
294 		else
295 			addr = 0x408100 + (tpid << 11) + (i << 7);
296 		mp10 = nvkm_rd32(device, addr + 0x10);
297 		status = nvkm_rd32(device, addr + 0x14);
298 		if (!status)
299 			continue;
300 		if (display) {
301 			nvkm_rd32(device, addr + 0x20);
302 			pc = nvkm_rd32(device, addr + 0x24);
303 			oplow = nvkm_rd32(device, addr + 0x70);
304 			ophigh = nvkm_rd32(device, addr + 0x74);
305 			nvkm_snprintbf(msg, sizeof(msg),
306 				       nv50_mp_exec_errors, status);
307 			nvkm_error(subdev, "TRAP_MP_EXEC - TP %d MP %d: "
308 					   "%08x [%s] at %06x warp %d, "
309 					   "opcode %08x %08x\n",
310 				   tpid, i, status, msg, pc & 0xffffff,
311 				   pc >> 24, oplow, ophigh);
312 		}
313 		nvkm_wr32(device, addr + 0x10, mp10);
314 		nvkm_wr32(device, addr + 0x14, 0);
315 		mps++;
316 	}
317 	if (!mps && display)
318 		nvkm_error(subdev, "TRAP_MP_EXEC - TP %d: "
319 				"No MPs claiming errors?\n", tpid);
320 }
321 
322 static void
323 nv50_gr_tp_trap(struct nv50_gr *gr, int type, u32 ustatus_old,
324 		  u32 ustatus_new, int display, const char *name)
325 {
326 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
327 	struct nvkm_device *device = subdev->device;
328 	u32 units = nvkm_rd32(device, 0x1540);
329 	int tps = 0;
330 	int i, r;
331 	char msg[128];
332 	u32 ustatus_addr, ustatus;
333 	for (i = 0; i < 16; i++) {
334 		if (!(units & (1 << i)))
335 			continue;
336 		if (device->chipset < 0xa0)
337 			ustatus_addr = ustatus_old + (i << 12);
338 		else
339 			ustatus_addr = ustatus_new + (i << 11);
340 		ustatus = nvkm_rd32(device, ustatus_addr) & 0x7fffffff;
341 		if (!ustatus)
342 			continue;
343 		tps++;
344 		switch (type) {
345 		case 6: /* texture error... unknown for now */
346 			if (display) {
347 				nvkm_error(subdev, "magic set %d:\n", i);
348 				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
349 					nvkm_error(subdev, "\t%08x: %08x\n", r,
350 						   nvkm_rd32(device, r));
351 				if (ustatus) {
352 					nvkm_snprintbf(msg, sizeof(msg),
353 						       nv50_tex_traps, ustatus);
354 					nvkm_error(subdev,
355 						   "%s - TP%d: %08x [%s]\n",
356 						   name, i, ustatus, msg);
357 					ustatus = 0;
358 				}
359 			}
360 			break;
361 		case 7: /* MP error */
362 			if (ustatus & 0x04030000) {
363 				nv50_gr_mp_trap(gr, i, display);
364 				ustatus &= ~0x04030000;
365 			}
366 			if (ustatus && display) {
367 				nvkm_snprintbf(msg, sizeof(msg),
368 					       nv50_mpc_traps, ustatus);
369 				nvkm_error(subdev, "%s - TP%d: %08x [%s]\n",
370 					   name, i, ustatus, msg);
371 				ustatus = 0;
372 			}
373 			break;
374 		case 8: /* PROP error */
375 			if (display)
376 				nv50_gr_prop_trap(
377 						gr, ustatus_addr, ustatus, i);
378 			ustatus = 0;
379 			break;
380 		}
381 		if (ustatus) {
382 			if (display)
383 				nvkm_error(subdev, "%s - TP%d: Unhandled ustatus %08x\n", name, i, ustatus);
384 		}
385 		nvkm_wr32(device, ustatus_addr, 0xc0000000);
386 	}
387 
388 	if (!tps && display)
389 		nvkm_warn(subdev, "%s - No TPs claiming errors?\n", name);
390 }
391 
392 static int
393 nv50_gr_trap_handler(struct nv50_gr *gr, u32 display,
394 		     int chid, u64 inst, const char *name)
395 {
396 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
397 	struct nvkm_device *device = subdev->device;
398 	u32 status = nvkm_rd32(device, 0x400108);
399 	u32 ustatus;
400 	char msg[128];
401 
402 	if (!status && display) {
403 		nvkm_error(subdev, "TRAP: no units reporting traps?\n");
404 		return 1;
405 	}
406 
407 	/* DISPATCH: Relays commands to other units and handles NOTIFY,
408 	 * COND, QUERY. If you get a trap from it, the command is still stuck
409 	 * in DISPATCH and you need to do something about it. */
410 	if (status & 0x001) {
411 		ustatus = nvkm_rd32(device, 0x400804) & 0x7fffffff;
412 		if (!ustatus && display) {
413 			nvkm_error(subdev, "TRAP_DISPATCH - no ustatus?\n");
414 		}
415 
416 		nvkm_wr32(device, 0x400500, 0x00000000);
417 
418 		/* Known to be triggered by screwed up NOTIFY and COND... */
419 		if (ustatus & 0x00000001) {
420 			u32 addr = nvkm_rd32(device, 0x400808);
421 			u32 subc = (addr & 0x00070000) >> 16;
422 			u32 mthd = (addr & 0x00001ffc);
423 			u32 datal = nvkm_rd32(device, 0x40080c);
424 			u32 datah = nvkm_rd32(device, 0x400810);
425 			u32 class = nvkm_rd32(device, 0x400814);
426 			u32 r848 = nvkm_rd32(device, 0x400848);
427 
428 			nvkm_error(subdev, "TRAP DISPATCH_FAULT\n");
429 			if (display && (addr & 0x80000000)) {
430 				nvkm_error(subdev,
431 					   "ch %d [%010llx %s] subc %d "
432 					   "class %04x mthd %04x data %08x%08x "
433 					   "400808 %08x 400848 %08x\n",
434 					   chid, inst, name, subc, class, mthd,
435 					   datah, datal, addr, r848);
436 			} else
437 			if (display) {
438 				nvkm_error(subdev, "no stuck command?\n");
439 			}
440 
441 			nvkm_wr32(device, 0x400808, 0);
442 			nvkm_wr32(device, 0x4008e8, nvkm_rd32(device, 0x4008e8) & 3);
443 			nvkm_wr32(device, 0x400848, 0);
444 			ustatus &= ~0x00000001;
445 		}
446 
447 		if (ustatus & 0x00000002) {
448 			u32 addr = nvkm_rd32(device, 0x40084c);
449 			u32 subc = (addr & 0x00070000) >> 16;
450 			u32 mthd = (addr & 0x00001ffc);
451 			u32 data = nvkm_rd32(device, 0x40085c);
452 			u32 class = nvkm_rd32(device, 0x400814);
453 
454 			nvkm_error(subdev, "TRAP DISPATCH_QUERY\n");
455 			if (display && (addr & 0x80000000)) {
456 				nvkm_error(subdev,
457 					   "ch %d [%010llx %s] subc %d "
458 					   "class %04x mthd %04x data %08x "
459 					   "40084c %08x\n", chid, inst, name,
460 					   subc, class, mthd, data, addr);
461 			} else
462 			if (display) {
463 				nvkm_error(subdev, "no stuck command?\n");
464 			}
465 
466 			nvkm_wr32(device, 0x40084c, 0);
467 			ustatus &= ~0x00000002;
468 		}
469 
470 		if (ustatus && display) {
471 			nvkm_error(subdev, "TRAP_DISPATCH "
472 					   "(unknown %08x)\n", ustatus);
473 		}
474 
475 		nvkm_wr32(device, 0x400804, 0xc0000000);
476 		nvkm_wr32(device, 0x400108, 0x001);
477 		status &= ~0x001;
478 		if (!status)
479 			return 0;
480 	}
481 
482 	/* M2MF: Memory to memory copy engine. */
483 	if (status & 0x002) {
484 		u32 ustatus = nvkm_rd32(device, 0x406800) & 0x7fffffff;
485 		if (display) {
486 			nvkm_snprintbf(msg, sizeof(msg),
487 				       nv50_gr_trap_m2mf, ustatus);
488 			nvkm_error(subdev, "TRAP_M2MF %08x [%s]\n",
489 				   ustatus, msg);
490 			nvkm_error(subdev, "TRAP_M2MF %08x %08x %08x %08x\n",
491 				   nvkm_rd32(device, 0x406804),
492 				   nvkm_rd32(device, 0x406808),
493 				   nvkm_rd32(device, 0x40680c),
494 				   nvkm_rd32(device, 0x406810));
495 		}
496 
497 		/* No sane way found yet -- just reset the bugger. */
498 		nvkm_wr32(device, 0x400040, 2);
499 		nvkm_wr32(device, 0x400040, 0);
500 		nvkm_wr32(device, 0x406800, 0xc0000000);
501 		nvkm_wr32(device, 0x400108, 0x002);
502 		status &= ~0x002;
503 	}
504 
505 	/* VFETCH: Fetches data from vertex buffers. */
506 	if (status & 0x004) {
507 		u32 ustatus = nvkm_rd32(device, 0x400c04) & 0x7fffffff;
508 		if (display) {
509 			nvkm_snprintbf(msg, sizeof(msg),
510 				       nv50_gr_trap_vfetch, ustatus);
511 			nvkm_error(subdev, "TRAP_VFETCH %08x [%s]\n",
512 				   ustatus, msg);
513 			nvkm_error(subdev, "TRAP_VFETCH %08x %08x %08x %08x\n",
514 				   nvkm_rd32(device, 0x400c00),
515 				   nvkm_rd32(device, 0x400c08),
516 				   nvkm_rd32(device, 0x400c0c),
517 				   nvkm_rd32(device, 0x400c10));
518 		}
519 
520 		nvkm_wr32(device, 0x400c04, 0xc0000000);
521 		nvkm_wr32(device, 0x400108, 0x004);
522 		status &= ~0x004;
523 	}
524 
525 	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
526 	if (status & 0x008) {
527 		ustatus = nvkm_rd32(device, 0x401800) & 0x7fffffff;
528 		if (display) {
529 			nvkm_snprintbf(msg, sizeof(msg),
530 				       nv50_gr_trap_strmout, ustatus);
531 			nvkm_error(subdev, "TRAP_STRMOUT %08x [%s]\n",
532 				   ustatus, msg);
533 			nvkm_error(subdev, "TRAP_STRMOUT %08x %08x %08x %08x\n",
534 				   nvkm_rd32(device, 0x401804),
535 				   nvkm_rd32(device, 0x401808),
536 				   nvkm_rd32(device, 0x40180c),
537 				   nvkm_rd32(device, 0x401810));
538 		}
539 
540 		/* No sane way found yet -- just reset the bugger. */
541 		nvkm_wr32(device, 0x400040, 0x80);
542 		nvkm_wr32(device, 0x400040, 0);
543 		nvkm_wr32(device, 0x401800, 0xc0000000);
544 		nvkm_wr32(device, 0x400108, 0x008);
545 		status &= ~0x008;
546 	}
547 
548 	/* CCACHE: Handles code and c[] caches and fills them. */
549 	if (status & 0x010) {
550 		ustatus = nvkm_rd32(device, 0x405018) & 0x7fffffff;
551 		if (display) {
552 			nvkm_snprintbf(msg, sizeof(msg),
553 				       nv50_gr_trap_ccache, ustatus);
554 			nvkm_error(subdev, "TRAP_CCACHE %08x [%s]\n",
555 				   ustatus, msg);
556 			nvkm_error(subdev, "TRAP_CCACHE %08x %08x %08x %08x "
557 					   "%08x %08x %08x\n",
558 				   nvkm_rd32(device, 0x405000),
559 				   nvkm_rd32(device, 0x405004),
560 				   nvkm_rd32(device, 0x405008),
561 				   nvkm_rd32(device, 0x40500c),
562 				   nvkm_rd32(device, 0x405010),
563 				   nvkm_rd32(device, 0x405014),
564 				   nvkm_rd32(device, 0x40501c));
565 		}
566 
567 		nvkm_wr32(device, 0x405018, 0xc0000000);
568 		nvkm_wr32(device, 0x400108, 0x010);
569 		status &= ~0x010;
570 	}
571 
572 	/* Unknown, not seen yet... 0x402000 is the only trap status reg
573 	 * remaining, so try to handle it anyway. Perhaps related to that
574 	 * unknown DMA slot on tesla? */
575 	if (status & 0x20) {
576 		ustatus = nvkm_rd32(device, 0x402000) & 0x7fffffff;
577 		if (display)
578 			nvkm_error(subdev, "TRAP_UNKC04 %08x\n", ustatus);
579 		nvkm_wr32(device, 0x402000, 0xc0000000);
580 		/* no status modifiction on purpose */
581 	}
582 
583 	/* TEXTURE: CUDA texturing units */
584 	if (status & 0x040) {
585 		nv50_gr_tp_trap(gr, 6, 0x408900, 0x408600, display,
586 				    "TRAP_TEXTURE");
587 		nvkm_wr32(device, 0x400108, 0x040);
588 		status &= ~0x040;
589 	}
590 
591 	/* MP: CUDA execution engines. */
592 	if (status & 0x080) {
593 		nv50_gr_tp_trap(gr, 7, 0x408314, 0x40831c, display,
594 				    "TRAP_MP");
595 		nvkm_wr32(device, 0x400108, 0x080);
596 		status &= ~0x080;
597 	}
598 
599 	/* PROP:  Handles TP-initiated uncached memory accesses:
600 	 * l[], g[], stack, 2d surfaces, render targets. */
601 	if (status & 0x100) {
602 		nv50_gr_tp_trap(gr, 8, 0x408e08, 0x408708, display,
603 				    "TRAP_PROP");
604 		nvkm_wr32(device, 0x400108, 0x100);
605 		status &= ~0x100;
606 	}
607 
608 	if (status) {
609 		if (display)
610 			nvkm_error(subdev, "TRAP: unknown %08x\n", status);
611 		nvkm_wr32(device, 0x400108, status);
612 	}
613 
614 	return 1;
615 }
616 
617 void
618 nv50_gr_intr(struct nvkm_gr *base)
619 {
620 	struct nv50_gr *gr = nv50_gr(base);
621 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
622 	struct nvkm_device *device = subdev->device;
623 	struct nvkm_fifo_chan *chan;
624 	u32 stat = nvkm_rd32(device, 0x400100);
625 	u32 inst = nvkm_rd32(device, 0x40032c) & 0x0fffffff;
626 	u32 addr = nvkm_rd32(device, 0x400704);
627 	u32 subc = (addr & 0x00070000) >> 16;
628 	u32 mthd = (addr & 0x00001ffc);
629 	u32 data = nvkm_rd32(device, 0x400708);
630 	u32 class = nvkm_rd32(device, 0x400814);
631 	u32 show = stat, show_bitfield = stat;
632 	const struct nvkm_enum *en;
633 	unsigned long flags;
634 	const char *name = "unknown";
635 	char msg[128];
636 	int chid = -1;
637 
638 	chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags);
639 	if (chan)  {
640 		name = chan->object.client->name;
641 		chid = chan->chid;
642 	}
643 
644 	if (show & 0x00100000) {
645 		u32 ecode = nvkm_rd32(device, 0x400110);
646 		en = nvkm_enum_find(nv50_data_error_names, ecode);
647 		nvkm_error(subdev, "DATA_ERROR %08x [%s]\n",
648 			   ecode, en ? en->name : "");
649 		show_bitfield &= ~0x00100000;
650 	}
651 
652 	if (stat & 0x00200000) {
653 		if (!nv50_gr_trap_handler(gr, show, chid, (u64)inst << 12, name))
654 			show &= ~0x00200000;
655 		show_bitfield &= ~0x00200000;
656 	}
657 
658 	nvkm_wr32(device, 0x400100, stat);
659 	nvkm_wr32(device, 0x400500, 0x00010001);
660 
661 	if (show) {
662 		show &= show_bitfield;
663 		nvkm_snprintbf(msg, sizeof(msg), nv50_gr_intr_name, show);
664 		nvkm_error(subdev, "%08x [%s] ch %d [%010llx %s] subc %d "
665 				   "class %04x mthd %04x data %08x\n",
666 			   stat, msg, chid, (u64)inst << 12, name,
667 			   subc, class, mthd, data);
668 	}
669 
670 	if (nvkm_rd32(device, 0x400824) & (1 << 31))
671 		nvkm_wr32(device, 0x400824, nvkm_rd32(device, 0x400824) & ~(1 << 31));
672 
673 	nvkm_fifo_chan_put(device->fifo, flags, &chan);
674 }
675 
676 int
677 nv50_gr_init(struct nvkm_gr *base)
678 {
679 	struct nv50_gr *gr = nv50_gr(base);
680 	struct nvkm_device *device = gr->base.engine.subdev.device;
681 	int ret, units, i;
682 
683 	/* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */
684 	nvkm_wr32(device, 0x40008c, 0x00000004);
685 
686 	/* reset/enable traps and interrupts */
687 	nvkm_wr32(device, 0x400804, 0xc0000000);
688 	nvkm_wr32(device, 0x406800, 0xc0000000);
689 	nvkm_wr32(device, 0x400c04, 0xc0000000);
690 	nvkm_wr32(device, 0x401800, 0xc0000000);
691 	nvkm_wr32(device, 0x405018, 0xc0000000);
692 	nvkm_wr32(device, 0x402000, 0xc0000000);
693 
694 	units = nvkm_rd32(device, 0x001540);
695 	for (i = 0; i < 16; i++) {
696 		if (!(units & (1 << i)))
697 			continue;
698 
699 		if (device->chipset < 0xa0) {
700 			nvkm_wr32(device, 0x408900 + (i << 12), 0xc0000000);
701 			nvkm_wr32(device, 0x408e08 + (i << 12), 0xc0000000);
702 			nvkm_wr32(device, 0x408314 + (i << 12), 0xc0000000);
703 		} else {
704 			nvkm_wr32(device, 0x408600 + (i << 11), 0xc0000000);
705 			nvkm_wr32(device, 0x408708 + (i << 11), 0xc0000000);
706 			nvkm_wr32(device, 0x40831c + (i << 11), 0xc0000000);
707 		}
708 	}
709 
710 	nvkm_wr32(device, 0x400108, 0xffffffff);
711 	nvkm_wr32(device, 0x400138, 0xffffffff);
712 	nvkm_wr32(device, 0x400100, 0xffffffff);
713 	nvkm_wr32(device, 0x40013c, 0xffffffff);
714 	nvkm_wr32(device, 0x400500, 0x00010001);
715 
716 	/* upload context program, initialise ctxctl defaults */
717 	ret = nv50_grctx_init(device, &gr->size);
718 	if (ret)
719 		return ret;
720 
721 	nvkm_wr32(device, 0x400824, 0x00000000);
722 	nvkm_wr32(device, 0x400828, 0x00000000);
723 	nvkm_wr32(device, 0x40082c, 0x00000000);
724 	nvkm_wr32(device, 0x400830, 0x00000000);
725 	nvkm_wr32(device, 0x40032c, 0x00000000);
726 	nvkm_wr32(device, 0x400330, 0x00000000);
727 
728 	/* some unknown zcull magic */
729 	switch (device->chipset & 0xf0) {
730 	case 0x50:
731 	case 0x80:
732 	case 0x90:
733 		nvkm_wr32(device, 0x402ca8, 0x00000800);
734 		break;
735 	case 0xa0:
736 	default:
737 		if (device->chipset == 0xa0 ||
738 		    device->chipset == 0xaa ||
739 		    device->chipset == 0xac) {
740 			nvkm_wr32(device, 0x402ca8, 0x00000802);
741 		} else {
742 			nvkm_wr32(device, 0x402cc0, 0x00000000);
743 			nvkm_wr32(device, 0x402ca8, 0x00000002);
744 		}
745 
746 		break;
747 	}
748 
749 	/* zero out zcull regions */
750 	for (i = 0; i < 8; i++) {
751 		nvkm_wr32(device, 0x402c20 + (i * 0x10), 0x00000000);
752 		nvkm_wr32(device, 0x402c24 + (i * 0x10), 0x00000000);
753 		nvkm_wr32(device, 0x402c28 + (i * 0x10), 0x00000000);
754 		nvkm_wr32(device, 0x402c2c + (i * 0x10), 0x00000000);
755 	}
756 
757 	return 0;
758 }
759 
760 int
761 nv50_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
762 	     int index, struct nvkm_gr **pgr)
763 {
764 	struct nv50_gr *gr;
765 
766 	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
767 		return -ENOMEM;
768 	spin_lock_init(&gr->lock);
769 	*pgr = &gr->base;
770 
771 	return nvkm_gr_ctor(func, device, index, true, &gr->base);
772 }
773 
774 static const struct nvkm_gr_func
775 nv50_gr = {
776 	.init = nv50_gr_init,
777 	.intr = nv50_gr_intr,
778 	.chan_new = nv50_gr_chan_new,
779 	.units = nv50_gr_units,
780 	.sclass = {
781 		{ -1, -1, 0x0030, &nv50_gr_object },
782 		{ -1, -1, 0x502d, &nv50_gr_object },
783 		{ -1, -1, 0x5039, &nv50_gr_object },
784 		{ -1, -1, 0x5097, &nv50_gr_object },
785 		{ -1, -1, 0x50c0, &nv50_gr_object },
786 		{}
787 	}
788 };
789 
790 int
791 nv50_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
792 {
793 	return nv50_gr_new_(&nv50_gr, device, index, pgr);
794 }
795