1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "nv50.h"
25 
26 #include <core/client.h>
27 #include <core/handle.h>
28 #include <engine/fifo.h>
29 #include <subdev/timer.h>
30 
31 struct nv50_gr {
32 	struct nvkm_gr base;
33 	spinlock_t lock;
34 	u32 size;
35 };
36 
37 struct nv50_gr_chan {
38 	struct nvkm_gr_chan base;
39 };
40 
41 static u64
42 nv50_gr_units(struct nvkm_gr *gr)
43 {
44 	return nvkm_rd32(gr->engine.subdev.device, 0x1540);
45 }
46 
47 /*******************************************************************************
48  * Graphics object classes
49  ******************************************************************************/
50 
51 static int
52 nv50_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
53 		    struct nvkm_oclass *oclass, void *data, u32 size,
54 		    struct nvkm_object **pobject)
55 {
56 	struct nvkm_gpuobj *obj;
57 	int ret;
58 
59 	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
60 				 16, 16, 0, &obj);
61 	*pobject = nv_object(obj);
62 	if (ret)
63 		return ret;
64 
65 	nv_wo32(obj, 0x00, nv_mclass(obj));
66 	nv_wo32(obj, 0x04, 0x00000000);
67 	nv_wo32(obj, 0x08, 0x00000000);
68 	nv_wo32(obj, 0x0c, 0x00000000);
69 	return 0;
70 }
71 
72 static struct nvkm_ofuncs
73 nv50_gr_ofuncs = {
74 	.ctor = nv50_gr_object_ctor,
75 	.dtor = _nvkm_gpuobj_dtor,
76 	.init = _nvkm_gpuobj_init,
77 	.fini = _nvkm_gpuobj_fini,
78 	.rd32 = _nvkm_gpuobj_rd32,
79 	.wr32 = _nvkm_gpuobj_wr32,
80 };
81 
82 static struct nvkm_oclass
83 nv50_gr_sclass[] = {
84 	{ 0x0030, &nv50_gr_ofuncs },
85 	{ 0x502d, &nv50_gr_ofuncs },
86 	{ 0x5039, &nv50_gr_ofuncs },
87 	{ 0x5097, &nv50_gr_ofuncs },
88 	{ 0x50c0, &nv50_gr_ofuncs },
89 	{}
90 };
91 
92 static struct nvkm_oclass
93 g84_gr_sclass[] = {
94 	{ 0x0030, &nv50_gr_ofuncs },
95 	{ 0x502d, &nv50_gr_ofuncs },
96 	{ 0x5039, &nv50_gr_ofuncs },
97 	{ 0x50c0, &nv50_gr_ofuncs },
98 	{ 0x8297, &nv50_gr_ofuncs },
99 	{}
100 };
101 
102 static struct nvkm_oclass
103 gt200_gr_sclass[] = {
104 	{ 0x0030, &nv50_gr_ofuncs },
105 	{ 0x502d, &nv50_gr_ofuncs },
106 	{ 0x5039, &nv50_gr_ofuncs },
107 	{ 0x50c0, &nv50_gr_ofuncs },
108 	{ 0x8397, &nv50_gr_ofuncs },
109 	{}
110 };
111 
112 static struct nvkm_oclass
113 gt215_gr_sclass[] = {
114 	{ 0x0030, &nv50_gr_ofuncs },
115 	{ 0x502d, &nv50_gr_ofuncs },
116 	{ 0x5039, &nv50_gr_ofuncs },
117 	{ 0x50c0, &nv50_gr_ofuncs },
118 	{ 0x8597, &nv50_gr_ofuncs },
119 	{ 0x85c0, &nv50_gr_ofuncs },
120 	{}
121 };
122 
123 static struct nvkm_oclass
124 mcp89_gr_sclass[] = {
125 	{ 0x0030, &nv50_gr_ofuncs },
126 	{ 0x502d, &nv50_gr_ofuncs },
127 	{ 0x5039, &nv50_gr_ofuncs },
128 	{ 0x50c0, &nv50_gr_ofuncs },
129 	{ 0x85c0, &nv50_gr_ofuncs },
130 	{ 0x8697, &nv50_gr_ofuncs },
131 	{}
132 };
133 
134 /*******************************************************************************
135  * PGRAPH context
136  ******************************************************************************/
137 
138 static int
139 nv50_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
140 		     struct nvkm_oclass *oclass, void *data, u32 size,
141 		     struct nvkm_object **pobject)
142 {
143 	struct nv50_gr *gr = (void *)engine;
144 	struct nv50_gr_chan *chan;
145 	int ret;
146 
147 	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, gr->size,
148 				     0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
149 	*pobject = nv_object(chan);
150 	if (ret)
151 		return ret;
152 
153 	nv50_grctx_fill(nv_device(gr), nv_gpuobj(chan));
154 	return 0;
155 }
156 
157 static struct nvkm_oclass
158 nv50_gr_cclass = {
159 	.handle = NV_ENGCTX(GR, 0x50),
160 	.ofuncs = &(struct nvkm_ofuncs) {
161 		.ctor = nv50_gr_context_ctor,
162 		.dtor = _nvkm_gr_context_dtor,
163 		.init = _nvkm_gr_context_init,
164 		.fini = _nvkm_gr_context_fini,
165 		.rd32 = _nvkm_gr_context_rd32,
166 		.wr32 = _nvkm_gr_context_wr32,
167 	},
168 };
169 
170 /*******************************************************************************
171  * PGRAPH engine/subdev functions
172  ******************************************************************************/
173 
174 static const struct nvkm_bitfield nv50_gr_status[] = {
175 	{ 0x00000001, "BUSY" }, /* set when any bit is set */
176 	{ 0x00000002, "DISPATCH" },
177 	{ 0x00000004, "UNK2" },
178 	{ 0x00000008, "UNK3" },
179 	{ 0x00000010, "UNK4" },
180 	{ 0x00000020, "UNK5" },
181 	{ 0x00000040, "M2MF" },
182 	{ 0x00000080, "UNK7" },
183 	{ 0x00000100, "CTXPROG" },
184 	{ 0x00000200, "VFETCH" },
185 	{ 0x00000400, "CCACHE_PREGEOM" },
186 	{ 0x00000800, "STRMOUT_VATTR_POSTGEOM" },
187 	{ 0x00001000, "VCLIP" },
188 	{ 0x00002000, "RATTR_APLANE" },
189 	{ 0x00004000, "TRAST" },
190 	{ 0x00008000, "CLIPID" },
191 	{ 0x00010000, "ZCULL" },
192 	{ 0x00020000, "ENG2D" },
193 	{ 0x00040000, "RMASK" },
194 	{ 0x00080000, "TPC_RAST" },
195 	{ 0x00100000, "TPC_PROP" },
196 	{ 0x00200000, "TPC_TEX" },
197 	{ 0x00400000, "TPC_GEOM" },
198 	{ 0x00800000, "TPC_MP" },
199 	{ 0x01000000, "ROP" },
200 	{}
201 };
202 
203 static const struct nvkm_bitfield
204 nv50_gr_vstatus_0[] = {
205 	{ 0x01, "VFETCH" },
206 	{ 0x02, "CCACHE" },
207 	{ 0x04, "PREGEOM" },
208 	{ 0x08, "POSTGEOM" },
209 	{ 0x10, "VATTR" },
210 	{ 0x20, "STRMOUT" },
211 	{ 0x40, "VCLIP" },
212 	{}
213 };
214 
215 static const struct nvkm_bitfield
216 nv50_gr_vstatus_1[] = {
217 	{ 0x01, "TPC_RAST" },
218 	{ 0x02, "TPC_PROP" },
219 	{ 0x04, "TPC_TEX" },
220 	{ 0x08, "TPC_GEOM" },
221 	{ 0x10, "TPC_MP" },
222 	{}
223 };
224 
225 static const struct nvkm_bitfield
226 nv50_gr_vstatus_2[] = {
227 	{ 0x01, "RATTR" },
228 	{ 0x02, "APLANE" },
229 	{ 0x04, "TRAST" },
230 	{ 0x08, "CLIPID" },
231 	{ 0x10, "ZCULL" },
232 	{ 0x20, "ENG2D" },
233 	{ 0x40, "RMASK" },
234 	{ 0x80, "ROP" },
235 	{}
236 };
237 
238 static void
239 nvkm_gr_vstatus_print(struct nv50_gr *gr, int r,
240 		      const struct nvkm_bitfield *units, u32 status)
241 {
242 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
243 	u32 stat = status;
244 	u8  mask = 0x00;
245 	char msg[64];
246 	int i;
247 
248 	for (i = 0; units[i].name && status; i++) {
249 		if ((status & 7) == 1)
250 			mask |= (1 << i);
251 		status >>= 3;
252 	}
253 
254 	nvkm_snprintbf(msg, sizeof(msg), units, mask);
255 	nvkm_error(subdev, "PGRAPH_VSTATUS%d: %08x [%s]\n", r, stat, msg);
256 }
257 
258 static int
259 g84_gr_tlb_flush(struct nvkm_engine *engine)
260 {
261 	struct nv50_gr *gr = (void *)engine;
262 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
263 	struct nvkm_device *device = subdev->device;
264 	struct nvkm_timer *tmr = device->timer;
265 	bool idle, timeout = false;
266 	unsigned long flags;
267 	char status[128];
268 	u64 start;
269 	u32 tmp;
270 
271 	spin_lock_irqsave(&gr->lock, flags);
272 	nvkm_mask(device, 0x400500, 0x00000001, 0x00000000);
273 
274 	start = tmr->read(tmr);
275 	do {
276 		idle = true;
277 
278 		for (tmp = nvkm_rd32(device, 0x400380); tmp && idle; tmp >>= 3) {
279 			if ((tmp & 7) == 1)
280 				idle = false;
281 		}
282 
283 		for (tmp = nvkm_rd32(device, 0x400384); tmp && idle; tmp >>= 3) {
284 			if ((tmp & 7) == 1)
285 				idle = false;
286 		}
287 
288 		for (tmp = nvkm_rd32(device, 0x400388); tmp && idle; tmp >>= 3) {
289 			if ((tmp & 7) == 1)
290 				idle = false;
291 		}
292 	} while (!idle &&
293 		 !(timeout = tmr->read(tmr) - start > 2000000000));
294 
295 	if (timeout) {
296 		nvkm_error(subdev, "PGRAPH TLB flush idle timeout fail\n");
297 
298 		tmp = nvkm_rd32(device, 0x400700);
299 		nvkm_snprintbf(status, sizeof(status), nv50_gr_status, tmp);
300 		nvkm_error(subdev, "PGRAPH_STATUS %08x [%s]\n", tmp, status);
301 
302 		nvkm_gr_vstatus_print(gr, 0, nv50_gr_vstatus_0,
303 				       nvkm_rd32(device, 0x400380));
304 		nvkm_gr_vstatus_print(gr, 1, nv50_gr_vstatus_1,
305 				       nvkm_rd32(device, 0x400384));
306 		nvkm_gr_vstatus_print(gr, 2, nv50_gr_vstatus_2,
307 				       nvkm_rd32(device, 0x400388));
308 	}
309 
310 
311 	nvkm_wr32(device, 0x100c80, 0x00000001);
312 	nvkm_msec(device, 2000,
313 		if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
314 			break;
315 	);
316 	nvkm_mask(device, 0x400500, 0x00000001, 0x00000001);
317 	spin_unlock_irqrestore(&gr->lock, flags);
318 	return timeout ? -EBUSY : 0;
319 }
320 
321 static const struct nvkm_bitfield nv50_mp_exec_errors[] = {
322 	{ 0x01, "STACK_UNDERFLOW" },
323 	{ 0x02, "STACK_MISMATCH" },
324 	{ 0x04, "QUADON_ACTIVE" },
325 	{ 0x08, "TIMEOUT" },
326 	{ 0x10, "INVALID_OPCODE" },
327 	{ 0x20, "PM_OVERFLOW" },
328 	{ 0x40, "BREAKPOINT" },
329 	{}
330 };
331 
332 static const struct nvkm_bitfield nv50_mpc_traps[] = {
333 	{ 0x0000001, "LOCAL_LIMIT_READ" },
334 	{ 0x0000010, "LOCAL_LIMIT_WRITE" },
335 	{ 0x0000040, "STACK_LIMIT" },
336 	{ 0x0000100, "GLOBAL_LIMIT_READ" },
337 	{ 0x0001000, "GLOBAL_LIMIT_WRITE" },
338 	{ 0x0010000, "MP0" },
339 	{ 0x0020000, "MP1" },
340 	{ 0x0040000, "GLOBAL_LIMIT_RED" },
341 	{ 0x0400000, "GLOBAL_LIMIT_ATOM" },
342 	{ 0x4000000, "MP2" },
343 	{}
344 };
345 
346 static const struct nvkm_bitfield nv50_tex_traps[] = {
347 	{ 0x00000001, "" }, /* any bit set? */
348 	{ 0x00000002, "FAULT" },
349 	{ 0x00000004, "STORAGE_TYPE_MISMATCH" },
350 	{ 0x00000008, "LINEAR_MISMATCH" },
351 	{ 0x00000020, "WRONG_MEMTYPE" },
352 	{}
353 };
354 
355 static const struct nvkm_bitfield nv50_gr_trap_m2mf[] = {
356 	{ 0x00000001, "NOTIFY" },
357 	{ 0x00000002, "IN" },
358 	{ 0x00000004, "OUT" },
359 	{}
360 };
361 
362 static const struct nvkm_bitfield nv50_gr_trap_vfetch[] = {
363 	{ 0x00000001, "FAULT" },
364 	{}
365 };
366 
367 static const struct nvkm_bitfield nv50_gr_trap_strmout[] = {
368 	{ 0x00000001, "FAULT" },
369 	{}
370 };
371 
372 static const struct nvkm_bitfield nv50_gr_trap_ccache[] = {
373 	{ 0x00000001, "FAULT" },
374 	{}
375 };
376 
377 /* There must be a *lot* of these. Will take some time to gather them up. */
378 const struct nvkm_enum nv50_data_error_names[] = {
379 	{ 0x00000003, "INVALID_OPERATION", NULL },
380 	{ 0x00000004, "INVALID_VALUE", NULL },
381 	{ 0x00000005, "INVALID_ENUM", NULL },
382 	{ 0x00000008, "INVALID_OBJECT", NULL },
383 	{ 0x00000009, "READ_ONLY_OBJECT", NULL },
384 	{ 0x0000000a, "SUPERVISOR_OBJECT", NULL },
385 	{ 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
386 	{ 0x0000000c, "INVALID_BITFIELD", NULL },
387 	{ 0x0000000d, "BEGIN_END_ACTIVE", NULL },
388 	{ 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
389 	{ 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
390 	{ 0x00000010, "RT_DOUBLE_BIND", NULL },
391 	{ 0x00000011, "RT_TYPES_MISMATCH", NULL },
392 	{ 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
393 	{ 0x00000015, "FP_TOO_FEW_REGS", NULL },
394 	{ 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
395 	{ 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
396 	{ 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
397 	{ 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
398 	{ 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
399 	{ 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
400 	{ 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
401 	{ 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
402 	{ 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
403 	{ 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
404 	{ 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
405 	{ 0x00000024, "VP_ZERO_INPUTS", NULL },
406 	{ 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
407 	{ 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
408 	{ 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
409 	{ 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
410 	{ 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
411 	{ 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
412 	{ 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
413 	{ 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
414 	{ 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
415 	{ 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
416 	{ 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
417 	{ 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
418 	{ 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
419 	{ 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
420 	{ 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
421 	{}
422 };
423 
424 static const struct nvkm_bitfield nv50_gr_intr_name[] = {
425 	{ 0x00000001, "NOTIFY" },
426 	{ 0x00000002, "COMPUTE_QUERY" },
427 	{ 0x00000010, "ILLEGAL_MTHD" },
428 	{ 0x00000020, "ILLEGAL_CLASS" },
429 	{ 0x00000040, "DOUBLE_NOTIFY" },
430 	{ 0x00001000, "CONTEXT_SWITCH" },
431 	{ 0x00010000, "BUFFER_NOTIFY" },
432 	{ 0x00100000, "DATA_ERROR" },
433 	{ 0x00200000, "TRAP" },
434 	{ 0x01000000, "SINGLE_STEP" },
435 	{}
436 };
437 
438 static const struct nvkm_bitfield nv50_gr_trap_prop[] = {
439 	{ 0x00000004, "SURF_WIDTH_OVERRUN" },
440 	{ 0x00000008, "SURF_HEIGHT_OVERRUN" },
441 	{ 0x00000010, "DST2D_FAULT" },
442 	{ 0x00000020, "ZETA_FAULT" },
443 	{ 0x00000040, "RT_FAULT" },
444 	{ 0x00000080, "CUDA_FAULT" },
445 	{ 0x00000100, "DST2D_STORAGE_TYPE_MISMATCH" },
446 	{ 0x00000200, "ZETA_STORAGE_TYPE_MISMATCH" },
447 	{ 0x00000400, "RT_STORAGE_TYPE_MISMATCH" },
448 	{ 0x00000800, "DST2D_LINEAR_MISMATCH" },
449 	{ 0x00001000, "RT_LINEAR_MISMATCH" },
450 	{}
451 };
452 
453 static void
454 nv50_gr_prop_trap(struct nv50_gr *gr, u32 ustatus_addr, u32 ustatus, u32 tp)
455 {
456 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
457 	struct nvkm_device *device = subdev->device;
458 	u32 e0c = nvkm_rd32(device, ustatus_addr + 0x04);
459 	u32 e10 = nvkm_rd32(device, ustatus_addr + 0x08);
460 	u32 e14 = nvkm_rd32(device, ustatus_addr + 0x0c);
461 	u32 e18 = nvkm_rd32(device, ustatus_addr + 0x10);
462 	u32 e1c = nvkm_rd32(device, ustatus_addr + 0x14);
463 	u32 e20 = nvkm_rd32(device, ustatus_addr + 0x18);
464 	u32 e24 = nvkm_rd32(device, ustatus_addr + 0x1c);
465 	char msg[128];
466 
467 	/* CUDA memory: l[], g[] or stack. */
468 	if (ustatus & 0x00000080) {
469 		if (e18 & 0x80000000) {
470 			/* g[] read fault? */
471 			nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
472 					 tp, e14, e10 | ((e18 >> 24) & 0x1f));
473 			e18 &= ~0x1f000000;
474 		} else if (e18 & 0xc) {
475 			/* g[] write fault? */
476 			nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
477 				 tp, e14, e10 | ((e18 >> 7) & 0x1f));
478 			e18 &= ~0x00000f80;
479 		} else {
480 			nvkm_error(subdev, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
481 				 tp, e14, e10);
482 		}
483 		ustatus &= ~0x00000080;
484 	}
485 	if (ustatus) {
486 		nvkm_snprintbf(msg, sizeof(msg), nv50_gr_trap_prop, ustatus);
487 		nvkm_error(subdev, "TRAP_PROP - TP %d - %08x [%s] - "
488 				   "Address %02x%08x\n",
489 			   tp, ustatus, msg, e14, e10);
490 	}
491 	nvkm_error(subdev, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
492 		 tp, e0c, e18, e1c, e20, e24);
493 }
494 
495 static void
496 nv50_gr_mp_trap(struct nv50_gr *gr, int tpid, int display)
497 {
498 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
499 	struct nvkm_device *device = subdev->device;
500 	u32 units = nvkm_rd32(device, 0x1540);
501 	u32 addr, mp10, status, pc, oplow, ophigh;
502 	char msg[128];
503 	int i;
504 	int mps = 0;
505 	for (i = 0; i < 4; i++) {
506 		if (!(units & 1 << (i+24)))
507 			continue;
508 		if (nv_device(gr)->chipset < 0xa0)
509 			addr = 0x408200 + (tpid << 12) + (i << 7);
510 		else
511 			addr = 0x408100 + (tpid << 11) + (i << 7);
512 		mp10 = nvkm_rd32(device, addr + 0x10);
513 		status = nvkm_rd32(device, addr + 0x14);
514 		if (!status)
515 			continue;
516 		if (display) {
517 			nvkm_rd32(device, addr + 0x20);
518 			pc = nvkm_rd32(device, addr + 0x24);
519 			oplow = nvkm_rd32(device, addr + 0x70);
520 			ophigh = nvkm_rd32(device, addr + 0x74);
521 			nvkm_snprintbf(msg, sizeof(msg),
522 				       nv50_mp_exec_errors, status);
523 			nvkm_error(subdev, "TRAP_MP_EXEC - TP %d MP %d: "
524 					   "%08x [%s] at %06x warp %d, "
525 					   "opcode %08x %08x\n",
526 				   tpid, i, status, msg, pc & 0xffffff,
527 				   pc >> 24, oplow, ophigh);
528 		}
529 		nvkm_wr32(device, addr + 0x10, mp10);
530 		nvkm_wr32(device, addr + 0x14, 0);
531 		mps++;
532 	}
533 	if (!mps && display)
534 		nvkm_error(subdev, "TRAP_MP_EXEC - TP %d: "
535 				"No MPs claiming errors?\n", tpid);
536 }
537 
538 static void
539 nv50_gr_tp_trap(struct nv50_gr *gr, int type, u32 ustatus_old,
540 		  u32 ustatus_new, int display, const char *name)
541 {
542 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
543 	struct nvkm_device *device = subdev->device;
544 	u32 units = nvkm_rd32(device, 0x1540);
545 	int tps = 0;
546 	int i, r;
547 	char msg[128];
548 	u32 ustatus_addr, ustatus;
549 	for (i = 0; i < 16; i++) {
550 		if (!(units & (1 << i)))
551 			continue;
552 		if (nv_device(gr)->chipset < 0xa0)
553 			ustatus_addr = ustatus_old + (i << 12);
554 		else
555 			ustatus_addr = ustatus_new + (i << 11);
556 		ustatus = nvkm_rd32(device, ustatus_addr) & 0x7fffffff;
557 		if (!ustatus)
558 			continue;
559 		tps++;
560 		switch (type) {
561 		case 6: /* texture error... unknown for now */
562 			if (display) {
563 				nvkm_error(subdev, "magic set %d:\n", i);
564 				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
565 					nvkm_error(subdev, "\t%08x: %08x\n", r,
566 						   nvkm_rd32(device, r));
567 				if (ustatus) {
568 					nvkm_snprintbf(msg, sizeof(msg),
569 						       nv50_tex_traps, ustatus);
570 					nvkm_error(subdev,
571 						   "%s - TP%d: %08x [%s]\n",
572 						   name, i, ustatus, msg);
573 					ustatus = 0;
574 				}
575 			}
576 			break;
577 		case 7: /* MP error */
578 			if (ustatus & 0x04030000) {
579 				nv50_gr_mp_trap(gr, i, display);
580 				ustatus &= ~0x04030000;
581 			}
582 			if (ustatus && display) {
583 				nvkm_snprintbf(msg, sizeof(msg),
584 					       nv50_mpc_traps, ustatus);
585 				nvkm_error(subdev, "%s - TP%d: %08x [%s]\n",
586 					   name, i, ustatus, msg);
587 				ustatus = 0;
588 			}
589 			break;
590 		case 8: /* PROP error */
591 			if (display)
592 				nv50_gr_prop_trap(
593 						gr, ustatus_addr, ustatus, i);
594 			ustatus = 0;
595 			break;
596 		}
597 		if (ustatus) {
598 			if (display)
599 				nvkm_error(subdev, "%s - TP%d: Unhandled ustatus %08x\n", name, i, ustatus);
600 		}
601 		nvkm_wr32(device, ustatus_addr, 0xc0000000);
602 	}
603 
604 	if (!tps && display)
605 		nvkm_warn(subdev, "%s - No TPs claiming errors?\n", name);
606 }
607 
608 static int
609 nv50_gr_trap_handler(struct nv50_gr *gr, u32 display,
610 		     int chid, u64 inst, struct nvkm_object *engctx)
611 {
612 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
613 	struct nvkm_device *device = subdev->device;
614 	u32 status = nvkm_rd32(device, 0x400108);
615 	u32 ustatus;
616 	char msg[128];
617 
618 	if (!status && display) {
619 		nvkm_error(subdev, "TRAP: no units reporting traps?\n");
620 		return 1;
621 	}
622 
623 	/* DISPATCH: Relays commands to other units and handles NOTIFY,
624 	 * COND, QUERY. If you get a trap from it, the command is still stuck
625 	 * in DISPATCH and you need to do something about it. */
626 	if (status & 0x001) {
627 		ustatus = nvkm_rd32(device, 0x400804) & 0x7fffffff;
628 		if (!ustatus && display) {
629 			nvkm_error(subdev, "TRAP_DISPATCH - no ustatus?\n");
630 		}
631 
632 		nvkm_wr32(device, 0x400500, 0x00000000);
633 
634 		/* Known to be triggered by screwed up NOTIFY and COND... */
635 		if (ustatus & 0x00000001) {
636 			u32 addr = nvkm_rd32(device, 0x400808);
637 			u32 subc = (addr & 0x00070000) >> 16;
638 			u32 mthd = (addr & 0x00001ffc);
639 			u32 datal = nvkm_rd32(device, 0x40080c);
640 			u32 datah = nvkm_rd32(device, 0x400810);
641 			u32 class = nvkm_rd32(device, 0x400814);
642 			u32 r848 = nvkm_rd32(device, 0x400848);
643 
644 			nvkm_error(subdev, "TRAP DISPATCH_FAULT\n");
645 			if (display && (addr & 0x80000000)) {
646 				nvkm_error(subdev,
647 					   "ch %d [%010llx %s] subc %d "
648 					   "class %04x mthd %04x data %08x%08x "
649 					   "400808 %08x 400848 %08x\n",
650 					   chid, inst,
651 					   nvkm_client_name(engctx),
652 					   subc, class, mthd,
653 					   datah, datal, addr, r848);
654 			} else
655 			if (display) {
656 				nvkm_error(subdev, "no stuck command?\n");
657 			}
658 
659 			nvkm_wr32(device, 0x400808, 0);
660 			nvkm_wr32(device, 0x4008e8, nvkm_rd32(device, 0x4008e8) & 3);
661 			nvkm_wr32(device, 0x400848, 0);
662 			ustatus &= ~0x00000001;
663 		}
664 
665 		if (ustatus & 0x00000002) {
666 			u32 addr = nvkm_rd32(device, 0x40084c);
667 			u32 subc = (addr & 0x00070000) >> 16;
668 			u32 mthd = (addr & 0x00001ffc);
669 			u32 data = nvkm_rd32(device, 0x40085c);
670 			u32 class = nvkm_rd32(device, 0x400814);
671 
672 			nvkm_error(subdev, "TRAP DISPATCH_QUERY\n");
673 			if (display && (addr & 0x80000000)) {
674 				nvkm_error(subdev,
675 					   "ch %d [%010llx %s] subc %d "
676 					   "class %04x mthd %04x data %08x "
677 					   "40084c %08x\n", chid, inst,
678 					   nvkm_client_name(engctx), subc,
679 					   class, mthd, data, addr);
680 			} else
681 			if (display) {
682 				nvkm_error(subdev, "no stuck command?\n");
683 			}
684 
685 			nvkm_wr32(device, 0x40084c, 0);
686 			ustatus &= ~0x00000002;
687 		}
688 
689 		if (ustatus && display) {
690 			nvkm_error(subdev, "TRAP_DISPATCH "
691 					   "(unknown %08x)\n", ustatus);
692 		}
693 
694 		nvkm_wr32(device, 0x400804, 0xc0000000);
695 		nvkm_wr32(device, 0x400108, 0x001);
696 		status &= ~0x001;
697 		if (!status)
698 			return 0;
699 	}
700 
701 	/* M2MF: Memory to memory copy engine. */
702 	if (status & 0x002) {
703 		u32 ustatus = nvkm_rd32(device, 0x406800) & 0x7fffffff;
704 		if (display) {
705 			nvkm_snprintbf(msg, sizeof(msg),
706 				       nv50_gr_trap_m2mf, ustatus);
707 			nvkm_error(subdev, "TRAP_M2MF %08x [%s]\n",
708 				   ustatus, msg);
709 			nvkm_error(subdev, "TRAP_M2MF %08x %08x %08x %08x\n",
710 				   nvkm_rd32(device, 0x406804),
711 				   nvkm_rd32(device, 0x406808),
712 				   nvkm_rd32(device, 0x40680c),
713 				   nvkm_rd32(device, 0x406810));
714 		}
715 
716 		/* No sane way found yet -- just reset the bugger. */
717 		nvkm_wr32(device, 0x400040, 2);
718 		nvkm_wr32(device, 0x400040, 0);
719 		nvkm_wr32(device, 0x406800, 0xc0000000);
720 		nvkm_wr32(device, 0x400108, 0x002);
721 		status &= ~0x002;
722 	}
723 
724 	/* VFETCH: Fetches data from vertex buffers. */
725 	if (status & 0x004) {
726 		u32 ustatus = nvkm_rd32(device, 0x400c04) & 0x7fffffff;
727 		if (display) {
728 			nvkm_snprintbf(msg, sizeof(msg),
729 				       nv50_gr_trap_vfetch, ustatus);
730 			nvkm_error(subdev, "TRAP_VFETCH %08x [%s]\n",
731 				   ustatus, msg);
732 			nvkm_error(subdev, "TRAP_VFETCH %08x %08x %08x %08x\n",
733 				   nvkm_rd32(device, 0x400c00),
734 				   nvkm_rd32(device, 0x400c08),
735 				   nvkm_rd32(device, 0x400c0c),
736 				   nvkm_rd32(device, 0x400c10));
737 		}
738 
739 		nvkm_wr32(device, 0x400c04, 0xc0000000);
740 		nvkm_wr32(device, 0x400108, 0x004);
741 		status &= ~0x004;
742 	}
743 
744 	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
745 	if (status & 0x008) {
746 		ustatus = nvkm_rd32(device, 0x401800) & 0x7fffffff;
747 		if (display) {
748 			nvkm_snprintbf(msg, sizeof(msg),
749 				       nv50_gr_trap_strmout, ustatus);
750 			nvkm_error(subdev, "TRAP_STRMOUT %08x [%s]\n",
751 				   ustatus, msg);
752 			nvkm_error(subdev, "TRAP_STRMOUT %08x %08x %08x %08x\n",
753 				   nvkm_rd32(device, 0x401804),
754 				   nvkm_rd32(device, 0x401808),
755 				   nvkm_rd32(device, 0x40180c),
756 				   nvkm_rd32(device, 0x401810));
757 		}
758 
759 		/* No sane way found yet -- just reset the bugger. */
760 		nvkm_wr32(device, 0x400040, 0x80);
761 		nvkm_wr32(device, 0x400040, 0);
762 		nvkm_wr32(device, 0x401800, 0xc0000000);
763 		nvkm_wr32(device, 0x400108, 0x008);
764 		status &= ~0x008;
765 	}
766 
767 	/* CCACHE: Handles code and c[] caches and fills them. */
768 	if (status & 0x010) {
769 		ustatus = nvkm_rd32(device, 0x405018) & 0x7fffffff;
770 		if (display) {
771 			nvkm_snprintbf(msg, sizeof(msg),
772 				       nv50_gr_trap_ccache, ustatus);
773 			nvkm_error(subdev, "TRAP_CCACHE %08x [%s]\n",
774 				   ustatus, msg);
775 			nvkm_error(subdev, "TRAP_CCACHE %08x %08x %08x %08x "
776 					   "%08x %08x %08x\n",
777 				   nvkm_rd32(device, 0x405000),
778 				   nvkm_rd32(device, 0x405004),
779 				   nvkm_rd32(device, 0x405008),
780 				   nvkm_rd32(device, 0x40500c),
781 				   nvkm_rd32(device, 0x405010),
782 				   nvkm_rd32(device, 0x405014),
783 				   nvkm_rd32(device, 0x40501c));
784 		}
785 
786 		nvkm_wr32(device, 0x405018, 0xc0000000);
787 		nvkm_wr32(device, 0x400108, 0x010);
788 		status &= ~0x010;
789 	}
790 
791 	/* Unknown, not seen yet... 0x402000 is the only trap status reg
792 	 * remaining, so try to handle it anyway. Perhaps related to that
793 	 * unknown DMA slot on tesla? */
794 	if (status & 0x20) {
795 		ustatus = nvkm_rd32(device, 0x402000) & 0x7fffffff;
796 		if (display)
797 			nvkm_error(subdev, "TRAP_UNKC04 %08x\n", ustatus);
798 		nvkm_wr32(device, 0x402000, 0xc0000000);
799 		/* no status modifiction on purpose */
800 	}
801 
802 	/* TEXTURE: CUDA texturing units */
803 	if (status & 0x040) {
804 		nv50_gr_tp_trap(gr, 6, 0x408900, 0x408600, display,
805 				    "TRAP_TEXTURE");
806 		nvkm_wr32(device, 0x400108, 0x040);
807 		status &= ~0x040;
808 	}
809 
810 	/* MP: CUDA execution engines. */
811 	if (status & 0x080) {
812 		nv50_gr_tp_trap(gr, 7, 0x408314, 0x40831c, display,
813 				    "TRAP_MP");
814 		nvkm_wr32(device, 0x400108, 0x080);
815 		status &= ~0x080;
816 	}
817 
818 	/* PROP:  Handles TP-initiated uncached memory accesses:
819 	 * l[], g[], stack, 2d surfaces, render targets. */
820 	if (status & 0x100) {
821 		nv50_gr_tp_trap(gr, 8, 0x408e08, 0x408708, display,
822 				    "TRAP_PROP");
823 		nvkm_wr32(device, 0x400108, 0x100);
824 		status &= ~0x100;
825 	}
826 
827 	if (status) {
828 		if (display)
829 			nvkm_error(subdev, "TRAP: unknown %08x\n", status);
830 		nvkm_wr32(device, 0x400108, status);
831 	}
832 
833 	return 1;
834 }
835 
836 static void
837 nv50_gr_intr(struct nvkm_subdev *subdev)
838 {
839 	struct nv50_gr *gr = (void *)subdev;
840 	struct nvkm_device *device = gr->base.engine.subdev.device;
841 	struct nvkm_fifo *fifo = device->fifo;
842 	struct nvkm_engine *engine = nv_engine(subdev);
843 	struct nvkm_object *engctx;
844 	struct nvkm_handle *handle = NULL;
845 	u32 stat = nvkm_rd32(device, 0x400100);
846 	u32 inst = nvkm_rd32(device, 0x40032c) & 0x0fffffff;
847 	u32 addr = nvkm_rd32(device, 0x400704);
848 	u32 subc = (addr & 0x00070000) >> 16;
849 	u32 mthd = (addr & 0x00001ffc);
850 	u32 data = nvkm_rd32(device, 0x400708);
851 	u32 class = nvkm_rd32(device, 0x400814);
852 	u32 show = stat, show_bitfield = stat;
853 	const struct nvkm_enum *en;
854 	char msg[128];
855 	int chid;
856 
857 	engctx = nvkm_engctx_get(engine, inst);
858 	chid   = fifo->chid(fifo, engctx);
859 
860 	if (stat & 0x00000010) {
861 		handle = nvkm_handle_get_class(engctx, class);
862 		if (handle && !nv_call(handle->object, mthd, data))
863 			show &= ~0x00000010;
864 		nvkm_handle_put(handle);
865 	}
866 
867 	if (show & 0x00100000) {
868 		u32 ecode = nvkm_rd32(device, 0x400110);
869 		en = nvkm_enum_find(nv50_data_error_names, ecode);
870 		nvkm_error(subdev, "DATA_ERROR %08x [%s]\n",
871 			   ecode, en ? en->name : "");
872 		show_bitfield &= ~0x00100000;
873 	}
874 
875 	if (stat & 0x00200000) {
876 		if (!nv50_gr_trap_handler(gr, show, chid, (u64)inst << 12,
877 					  engctx))
878 			show &= ~0x00200000;
879 		show_bitfield &= ~0x00200000;
880 	}
881 
882 	nvkm_wr32(device, 0x400100, stat);
883 	nvkm_wr32(device, 0x400500, 0x00010001);
884 
885 	if (show) {
886 		show &= show_bitfield;
887 		nvkm_snprintbf(msg, sizeof(msg), nv50_gr_intr_name, show);
888 		nvkm_error(subdev, "%08x [%s] ch %d [%010llx %s] subc %d "
889 				   "class %04x mthd %04x data %08x\n",
890 			   stat, msg, chid, (u64)inst << 12,
891 			   nvkm_client_name(engctx), subc, class, mthd, data);
892 	}
893 
894 	if (nvkm_rd32(device, 0x400824) & (1 << 31))
895 		nvkm_wr32(device, 0x400824, nvkm_rd32(device, 0x400824) & ~(1 << 31));
896 
897 	nvkm_engctx_put(engctx);
898 }
899 
900 static int
901 nv50_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
902 	     struct nvkm_oclass *oclass, void *data, u32 size,
903 	     struct nvkm_object **pobject)
904 {
905 	struct nv50_gr *gr;
906 	int ret;
907 
908 	ret = nvkm_gr_create(parent, engine, oclass, true, &gr);
909 	*pobject = nv_object(gr);
910 	if (ret)
911 		return ret;
912 
913 	nv_subdev(gr)->unit = 0x00201000;
914 	nv_subdev(gr)->intr = nv50_gr_intr;
915 	nv_engine(gr)->cclass = &nv50_gr_cclass;
916 
917 	gr->base.units = nv50_gr_units;
918 
919 	switch (nv_device(gr)->chipset) {
920 	case 0x50:
921 		nv_engine(gr)->sclass = nv50_gr_sclass;
922 		break;
923 	case 0x84:
924 	case 0x86:
925 	case 0x92:
926 	case 0x94:
927 	case 0x96:
928 	case 0x98:
929 		nv_engine(gr)->sclass = g84_gr_sclass;
930 		break;
931 	case 0xa0:
932 	case 0xaa:
933 	case 0xac:
934 		nv_engine(gr)->sclass = gt200_gr_sclass;
935 		break;
936 	case 0xa3:
937 	case 0xa5:
938 	case 0xa8:
939 		nv_engine(gr)->sclass = gt215_gr_sclass;
940 		break;
941 	case 0xaf:
942 		nv_engine(gr)->sclass = mcp89_gr_sclass;
943 		break;
944 
945 	}
946 
947 	/* unfortunate hw bug workaround... */
948 	if (nv_device(gr)->chipset != 0x50 &&
949 	    nv_device(gr)->chipset != 0xac)
950 		nv_engine(gr)->tlb_flush = g84_gr_tlb_flush;
951 
952 	spin_lock_init(&gr->lock);
953 	return 0;
954 }
955 
956 static int
957 nv50_gr_init(struct nvkm_object *object)
958 {
959 	struct nv50_gr *gr = (void *)object;
960 	struct nvkm_device *device = gr->base.engine.subdev.device;
961 	int ret, units, i;
962 
963 	ret = nvkm_gr_init(&gr->base);
964 	if (ret)
965 		return ret;
966 
967 	/* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */
968 	nvkm_wr32(device, 0x40008c, 0x00000004);
969 
970 	/* reset/enable traps and interrupts */
971 	nvkm_wr32(device, 0x400804, 0xc0000000);
972 	nvkm_wr32(device, 0x406800, 0xc0000000);
973 	nvkm_wr32(device, 0x400c04, 0xc0000000);
974 	nvkm_wr32(device, 0x401800, 0xc0000000);
975 	nvkm_wr32(device, 0x405018, 0xc0000000);
976 	nvkm_wr32(device, 0x402000, 0xc0000000);
977 
978 	units = nvkm_rd32(device, 0x001540);
979 	for (i = 0; i < 16; i++) {
980 		if (!(units & (1 << i)))
981 			continue;
982 
983 		if (nv_device(gr)->chipset < 0xa0) {
984 			nvkm_wr32(device, 0x408900 + (i << 12), 0xc0000000);
985 			nvkm_wr32(device, 0x408e08 + (i << 12), 0xc0000000);
986 			nvkm_wr32(device, 0x408314 + (i << 12), 0xc0000000);
987 		} else {
988 			nvkm_wr32(device, 0x408600 + (i << 11), 0xc0000000);
989 			nvkm_wr32(device, 0x408708 + (i << 11), 0xc0000000);
990 			nvkm_wr32(device, 0x40831c + (i << 11), 0xc0000000);
991 		}
992 	}
993 
994 	nvkm_wr32(device, 0x400108, 0xffffffff);
995 	nvkm_wr32(device, 0x400138, 0xffffffff);
996 	nvkm_wr32(device, 0x400100, 0xffffffff);
997 	nvkm_wr32(device, 0x40013c, 0xffffffff);
998 	nvkm_wr32(device, 0x400500, 0x00010001);
999 
1000 	/* upload context program, initialise ctxctl defaults */
1001 	ret = nv50_grctx_init(nv_device(gr), &gr->size);
1002 	if (ret)
1003 		return ret;
1004 
1005 	nvkm_wr32(device, 0x400824, 0x00000000);
1006 	nvkm_wr32(device, 0x400828, 0x00000000);
1007 	nvkm_wr32(device, 0x40082c, 0x00000000);
1008 	nvkm_wr32(device, 0x400830, 0x00000000);
1009 	nvkm_wr32(device, 0x40032c, 0x00000000);
1010 	nvkm_wr32(device, 0x400330, 0x00000000);
1011 
1012 	/* some unknown zcull magic */
1013 	switch (nv_device(gr)->chipset & 0xf0) {
1014 	case 0x50:
1015 	case 0x80:
1016 	case 0x90:
1017 		nvkm_wr32(device, 0x402ca8, 0x00000800);
1018 		break;
1019 	case 0xa0:
1020 	default:
1021 		if (nv_device(gr)->chipset == 0xa0 ||
1022 		    nv_device(gr)->chipset == 0xaa ||
1023 		    nv_device(gr)->chipset == 0xac) {
1024 			nvkm_wr32(device, 0x402ca8, 0x00000802);
1025 		} else {
1026 			nvkm_wr32(device, 0x402cc0, 0x00000000);
1027 			nvkm_wr32(device, 0x402ca8, 0x00000002);
1028 		}
1029 
1030 		break;
1031 	}
1032 
1033 	/* zero out zcull regions */
1034 	for (i = 0; i < 8; i++) {
1035 		nvkm_wr32(device, 0x402c20 + (i * 0x10), 0x00000000);
1036 		nvkm_wr32(device, 0x402c24 + (i * 0x10), 0x00000000);
1037 		nvkm_wr32(device, 0x402c28 + (i * 0x10), 0x00000000);
1038 		nvkm_wr32(device, 0x402c2c + (i * 0x10), 0x00000000);
1039 	}
1040 	return 0;
1041 }
1042 
1043 struct nvkm_oclass
1044 nv50_gr_oclass = {
1045 	.handle = NV_ENGINE(GR, 0x50),
1046 	.ofuncs = &(struct nvkm_ofuncs) {
1047 		.ctor = nv50_gr_ctor,
1048 		.dtor = _nvkm_gr_dtor,
1049 		.init = nv50_gr_init,
1050 		.fini = _nvkm_gr_fini,
1051 	},
1052 };
1053