1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "nv50.h"
25 
26 #include <core/client.h>
27 #include <core/handle.h>
28 #include <engine/fifo.h>
29 #include <subdev/timer.h>
30 
31 struct nv50_gr {
32 	struct nvkm_gr base;
33 	spinlock_t lock;
34 	u32 size;
35 };
36 
37 struct nv50_gr_chan {
38 	struct nvkm_gr_chan base;
39 };
40 
41 static u64
42 nv50_gr_units(struct nvkm_gr *gr)
43 {
44 	return nvkm_rd32(gr->engine.subdev.device, 0x1540);
45 }
46 
47 /*******************************************************************************
48  * Graphics object classes
49  ******************************************************************************/
50 
51 static int
52 nv50_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
53 		    struct nvkm_oclass *oclass, void *data, u32 size,
54 		    struct nvkm_object **pobject)
55 {
56 	struct nvkm_gpuobj *obj;
57 	int ret;
58 
59 	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
60 				 16, 16, 0, &obj);
61 	*pobject = nv_object(obj);
62 	if (ret)
63 		return ret;
64 
65 	nvkm_kmap(obj);
66 	nvkm_wo32(obj, 0x00, nv_mclass(obj));
67 	nvkm_wo32(obj, 0x04, 0x00000000);
68 	nvkm_wo32(obj, 0x08, 0x00000000);
69 	nvkm_wo32(obj, 0x0c, 0x00000000);
70 	nvkm_done(obj);
71 	return 0;
72 }
73 
74 static struct nvkm_ofuncs
75 nv50_gr_ofuncs = {
76 	.ctor = nv50_gr_object_ctor,
77 	.dtor = _nvkm_gpuobj_dtor,
78 	.init = _nvkm_gpuobj_init,
79 	.fini = _nvkm_gpuobj_fini,
80 	.rd32 = _nvkm_gpuobj_rd32,
81 	.wr32 = _nvkm_gpuobj_wr32,
82 };
83 
84 static struct nvkm_oclass
85 nv50_gr_sclass[] = {
86 	{ 0x0030, &nv50_gr_ofuncs },
87 	{ 0x502d, &nv50_gr_ofuncs },
88 	{ 0x5039, &nv50_gr_ofuncs },
89 	{ 0x5097, &nv50_gr_ofuncs },
90 	{ 0x50c0, &nv50_gr_ofuncs },
91 	{}
92 };
93 
94 static struct nvkm_oclass
95 g84_gr_sclass[] = {
96 	{ 0x0030, &nv50_gr_ofuncs },
97 	{ 0x502d, &nv50_gr_ofuncs },
98 	{ 0x5039, &nv50_gr_ofuncs },
99 	{ 0x50c0, &nv50_gr_ofuncs },
100 	{ 0x8297, &nv50_gr_ofuncs },
101 	{}
102 };
103 
104 static struct nvkm_oclass
105 gt200_gr_sclass[] = {
106 	{ 0x0030, &nv50_gr_ofuncs },
107 	{ 0x502d, &nv50_gr_ofuncs },
108 	{ 0x5039, &nv50_gr_ofuncs },
109 	{ 0x50c0, &nv50_gr_ofuncs },
110 	{ 0x8397, &nv50_gr_ofuncs },
111 	{}
112 };
113 
114 static struct nvkm_oclass
115 gt215_gr_sclass[] = {
116 	{ 0x0030, &nv50_gr_ofuncs },
117 	{ 0x502d, &nv50_gr_ofuncs },
118 	{ 0x5039, &nv50_gr_ofuncs },
119 	{ 0x50c0, &nv50_gr_ofuncs },
120 	{ 0x8597, &nv50_gr_ofuncs },
121 	{ 0x85c0, &nv50_gr_ofuncs },
122 	{}
123 };
124 
125 static struct nvkm_oclass
126 mcp89_gr_sclass[] = {
127 	{ 0x0030, &nv50_gr_ofuncs },
128 	{ 0x502d, &nv50_gr_ofuncs },
129 	{ 0x5039, &nv50_gr_ofuncs },
130 	{ 0x50c0, &nv50_gr_ofuncs },
131 	{ 0x85c0, &nv50_gr_ofuncs },
132 	{ 0x8697, &nv50_gr_ofuncs },
133 	{}
134 };
135 
136 /*******************************************************************************
137  * PGRAPH context
138  ******************************************************************************/
139 
140 static int
141 nv50_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
142 		     struct nvkm_oclass *oclass, void *data, u32 size,
143 		     struct nvkm_object **pobject)
144 {
145 	struct nv50_gr *gr = (void *)engine;
146 	struct nv50_gr_chan *chan;
147 	int ret;
148 
149 	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, gr->size,
150 				     0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
151 	*pobject = nv_object(chan);
152 	if (ret)
153 		return ret;
154 
155 	nv50_grctx_fill(nv_device(gr), nv_gpuobj(chan));
156 	return 0;
157 }
158 
159 static struct nvkm_oclass
160 nv50_gr_cclass = {
161 	.handle = NV_ENGCTX(GR, 0x50),
162 	.ofuncs = &(struct nvkm_ofuncs) {
163 		.ctor = nv50_gr_context_ctor,
164 		.dtor = _nvkm_gr_context_dtor,
165 		.init = _nvkm_gr_context_init,
166 		.fini = _nvkm_gr_context_fini,
167 		.rd32 = _nvkm_gr_context_rd32,
168 		.wr32 = _nvkm_gr_context_wr32,
169 	},
170 };
171 
172 /*******************************************************************************
173  * PGRAPH engine/subdev functions
174  ******************************************************************************/
175 
176 static const struct nvkm_bitfield nv50_gr_status[] = {
177 	{ 0x00000001, "BUSY" }, /* set when any bit is set */
178 	{ 0x00000002, "DISPATCH" },
179 	{ 0x00000004, "UNK2" },
180 	{ 0x00000008, "UNK3" },
181 	{ 0x00000010, "UNK4" },
182 	{ 0x00000020, "UNK5" },
183 	{ 0x00000040, "M2MF" },
184 	{ 0x00000080, "UNK7" },
185 	{ 0x00000100, "CTXPROG" },
186 	{ 0x00000200, "VFETCH" },
187 	{ 0x00000400, "CCACHE_PREGEOM" },
188 	{ 0x00000800, "STRMOUT_VATTR_POSTGEOM" },
189 	{ 0x00001000, "VCLIP" },
190 	{ 0x00002000, "RATTR_APLANE" },
191 	{ 0x00004000, "TRAST" },
192 	{ 0x00008000, "CLIPID" },
193 	{ 0x00010000, "ZCULL" },
194 	{ 0x00020000, "ENG2D" },
195 	{ 0x00040000, "RMASK" },
196 	{ 0x00080000, "TPC_RAST" },
197 	{ 0x00100000, "TPC_PROP" },
198 	{ 0x00200000, "TPC_TEX" },
199 	{ 0x00400000, "TPC_GEOM" },
200 	{ 0x00800000, "TPC_MP" },
201 	{ 0x01000000, "ROP" },
202 	{}
203 };
204 
205 static const struct nvkm_bitfield
206 nv50_gr_vstatus_0[] = {
207 	{ 0x01, "VFETCH" },
208 	{ 0x02, "CCACHE" },
209 	{ 0x04, "PREGEOM" },
210 	{ 0x08, "POSTGEOM" },
211 	{ 0x10, "VATTR" },
212 	{ 0x20, "STRMOUT" },
213 	{ 0x40, "VCLIP" },
214 	{}
215 };
216 
217 static const struct nvkm_bitfield
218 nv50_gr_vstatus_1[] = {
219 	{ 0x01, "TPC_RAST" },
220 	{ 0x02, "TPC_PROP" },
221 	{ 0x04, "TPC_TEX" },
222 	{ 0x08, "TPC_GEOM" },
223 	{ 0x10, "TPC_MP" },
224 	{}
225 };
226 
227 static const struct nvkm_bitfield
228 nv50_gr_vstatus_2[] = {
229 	{ 0x01, "RATTR" },
230 	{ 0x02, "APLANE" },
231 	{ 0x04, "TRAST" },
232 	{ 0x08, "CLIPID" },
233 	{ 0x10, "ZCULL" },
234 	{ 0x20, "ENG2D" },
235 	{ 0x40, "RMASK" },
236 	{ 0x80, "ROP" },
237 	{}
238 };
239 
240 static void
241 nvkm_gr_vstatus_print(struct nv50_gr *gr, int r,
242 		      const struct nvkm_bitfield *units, u32 status)
243 {
244 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
245 	u32 stat = status;
246 	u8  mask = 0x00;
247 	char msg[64];
248 	int i;
249 
250 	for (i = 0; units[i].name && status; i++) {
251 		if ((status & 7) == 1)
252 			mask |= (1 << i);
253 		status >>= 3;
254 	}
255 
256 	nvkm_snprintbf(msg, sizeof(msg), units, mask);
257 	nvkm_error(subdev, "PGRAPH_VSTATUS%d: %08x [%s]\n", r, stat, msg);
258 }
259 
260 static int
261 g84_gr_tlb_flush(struct nvkm_engine *engine)
262 {
263 	struct nv50_gr *gr = (void *)engine;
264 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
265 	struct nvkm_device *device = subdev->device;
266 	struct nvkm_timer *tmr = device->timer;
267 	bool idle, timeout = false;
268 	unsigned long flags;
269 	char status[128];
270 	u64 start;
271 	u32 tmp;
272 
273 	spin_lock_irqsave(&gr->lock, flags);
274 	nvkm_mask(device, 0x400500, 0x00000001, 0x00000000);
275 
276 	start = tmr->read(tmr);
277 	do {
278 		idle = true;
279 
280 		for (tmp = nvkm_rd32(device, 0x400380); tmp && idle; tmp >>= 3) {
281 			if ((tmp & 7) == 1)
282 				idle = false;
283 		}
284 
285 		for (tmp = nvkm_rd32(device, 0x400384); tmp && idle; tmp >>= 3) {
286 			if ((tmp & 7) == 1)
287 				idle = false;
288 		}
289 
290 		for (tmp = nvkm_rd32(device, 0x400388); tmp && idle; tmp >>= 3) {
291 			if ((tmp & 7) == 1)
292 				idle = false;
293 		}
294 	} while (!idle &&
295 		 !(timeout = tmr->read(tmr) - start > 2000000000));
296 
297 	if (timeout) {
298 		nvkm_error(subdev, "PGRAPH TLB flush idle timeout fail\n");
299 
300 		tmp = nvkm_rd32(device, 0x400700);
301 		nvkm_snprintbf(status, sizeof(status), nv50_gr_status, tmp);
302 		nvkm_error(subdev, "PGRAPH_STATUS %08x [%s]\n", tmp, status);
303 
304 		nvkm_gr_vstatus_print(gr, 0, nv50_gr_vstatus_0,
305 				       nvkm_rd32(device, 0x400380));
306 		nvkm_gr_vstatus_print(gr, 1, nv50_gr_vstatus_1,
307 				       nvkm_rd32(device, 0x400384));
308 		nvkm_gr_vstatus_print(gr, 2, nv50_gr_vstatus_2,
309 				       nvkm_rd32(device, 0x400388));
310 	}
311 
312 
313 	nvkm_wr32(device, 0x100c80, 0x00000001);
314 	nvkm_msec(device, 2000,
315 		if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
316 			break;
317 	);
318 	nvkm_mask(device, 0x400500, 0x00000001, 0x00000001);
319 	spin_unlock_irqrestore(&gr->lock, flags);
320 	return timeout ? -EBUSY : 0;
321 }
322 
323 static const struct nvkm_bitfield nv50_mp_exec_errors[] = {
324 	{ 0x01, "STACK_UNDERFLOW" },
325 	{ 0x02, "STACK_MISMATCH" },
326 	{ 0x04, "QUADON_ACTIVE" },
327 	{ 0x08, "TIMEOUT" },
328 	{ 0x10, "INVALID_OPCODE" },
329 	{ 0x20, "PM_OVERFLOW" },
330 	{ 0x40, "BREAKPOINT" },
331 	{}
332 };
333 
334 static const struct nvkm_bitfield nv50_mpc_traps[] = {
335 	{ 0x0000001, "LOCAL_LIMIT_READ" },
336 	{ 0x0000010, "LOCAL_LIMIT_WRITE" },
337 	{ 0x0000040, "STACK_LIMIT" },
338 	{ 0x0000100, "GLOBAL_LIMIT_READ" },
339 	{ 0x0001000, "GLOBAL_LIMIT_WRITE" },
340 	{ 0x0010000, "MP0" },
341 	{ 0x0020000, "MP1" },
342 	{ 0x0040000, "GLOBAL_LIMIT_RED" },
343 	{ 0x0400000, "GLOBAL_LIMIT_ATOM" },
344 	{ 0x4000000, "MP2" },
345 	{}
346 };
347 
348 static const struct nvkm_bitfield nv50_tex_traps[] = {
349 	{ 0x00000001, "" }, /* any bit set? */
350 	{ 0x00000002, "FAULT" },
351 	{ 0x00000004, "STORAGE_TYPE_MISMATCH" },
352 	{ 0x00000008, "LINEAR_MISMATCH" },
353 	{ 0x00000020, "WRONG_MEMTYPE" },
354 	{}
355 };
356 
357 static const struct nvkm_bitfield nv50_gr_trap_m2mf[] = {
358 	{ 0x00000001, "NOTIFY" },
359 	{ 0x00000002, "IN" },
360 	{ 0x00000004, "OUT" },
361 	{}
362 };
363 
364 static const struct nvkm_bitfield nv50_gr_trap_vfetch[] = {
365 	{ 0x00000001, "FAULT" },
366 	{}
367 };
368 
369 static const struct nvkm_bitfield nv50_gr_trap_strmout[] = {
370 	{ 0x00000001, "FAULT" },
371 	{}
372 };
373 
374 static const struct nvkm_bitfield nv50_gr_trap_ccache[] = {
375 	{ 0x00000001, "FAULT" },
376 	{}
377 };
378 
379 /* There must be a *lot* of these. Will take some time to gather them up. */
380 const struct nvkm_enum nv50_data_error_names[] = {
381 	{ 0x00000003, "INVALID_OPERATION", NULL },
382 	{ 0x00000004, "INVALID_VALUE", NULL },
383 	{ 0x00000005, "INVALID_ENUM", NULL },
384 	{ 0x00000008, "INVALID_OBJECT", NULL },
385 	{ 0x00000009, "READ_ONLY_OBJECT", NULL },
386 	{ 0x0000000a, "SUPERVISOR_OBJECT", NULL },
387 	{ 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
388 	{ 0x0000000c, "INVALID_BITFIELD", NULL },
389 	{ 0x0000000d, "BEGIN_END_ACTIVE", NULL },
390 	{ 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
391 	{ 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
392 	{ 0x00000010, "RT_DOUBLE_BIND", NULL },
393 	{ 0x00000011, "RT_TYPES_MISMATCH", NULL },
394 	{ 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
395 	{ 0x00000015, "FP_TOO_FEW_REGS", NULL },
396 	{ 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
397 	{ 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
398 	{ 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
399 	{ 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
400 	{ 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
401 	{ 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
402 	{ 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
403 	{ 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
404 	{ 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
405 	{ 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
406 	{ 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
407 	{ 0x00000024, "VP_ZERO_INPUTS", NULL },
408 	{ 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
409 	{ 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
410 	{ 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
411 	{ 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
412 	{ 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
413 	{ 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
414 	{ 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
415 	{ 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
416 	{ 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
417 	{ 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
418 	{ 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
419 	{ 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
420 	{ 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
421 	{ 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
422 	{ 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
423 	{}
424 };
425 
426 static const struct nvkm_bitfield nv50_gr_intr_name[] = {
427 	{ 0x00000001, "NOTIFY" },
428 	{ 0x00000002, "COMPUTE_QUERY" },
429 	{ 0x00000010, "ILLEGAL_MTHD" },
430 	{ 0x00000020, "ILLEGAL_CLASS" },
431 	{ 0x00000040, "DOUBLE_NOTIFY" },
432 	{ 0x00001000, "CONTEXT_SWITCH" },
433 	{ 0x00010000, "BUFFER_NOTIFY" },
434 	{ 0x00100000, "DATA_ERROR" },
435 	{ 0x00200000, "TRAP" },
436 	{ 0x01000000, "SINGLE_STEP" },
437 	{}
438 };
439 
440 static const struct nvkm_bitfield nv50_gr_trap_prop[] = {
441 	{ 0x00000004, "SURF_WIDTH_OVERRUN" },
442 	{ 0x00000008, "SURF_HEIGHT_OVERRUN" },
443 	{ 0x00000010, "DST2D_FAULT" },
444 	{ 0x00000020, "ZETA_FAULT" },
445 	{ 0x00000040, "RT_FAULT" },
446 	{ 0x00000080, "CUDA_FAULT" },
447 	{ 0x00000100, "DST2D_STORAGE_TYPE_MISMATCH" },
448 	{ 0x00000200, "ZETA_STORAGE_TYPE_MISMATCH" },
449 	{ 0x00000400, "RT_STORAGE_TYPE_MISMATCH" },
450 	{ 0x00000800, "DST2D_LINEAR_MISMATCH" },
451 	{ 0x00001000, "RT_LINEAR_MISMATCH" },
452 	{}
453 };
454 
455 static void
456 nv50_gr_prop_trap(struct nv50_gr *gr, u32 ustatus_addr, u32 ustatus, u32 tp)
457 {
458 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
459 	struct nvkm_device *device = subdev->device;
460 	u32 e0c = nvkm_rd32(device, ustatus_addr + 0x04);
461 	u32 e10 = nvkm_rd32(device, ustatus_addr + 0x08);
462 	u32 e14 = nvkm_rd32(device, ustatus_addr + 0x0c);
463 	u32 e18 = nvkm_rd32(device, ustatus_addr + 0x10);
464 	u32 e1c = nvkm_rd32(device, ustatus_addr + 0x14);
465 	u32 e20 = nvkm_rd32(device, ustatus_addr + 0x18);
466 	u32 e24 = nvkm_rd32(device, ustatus_addr + 0x1c);
467 	char msg[128];
468 
469 	/* CUDA memory: l[], g[] or stack. */
470 	if (ustatus & 0x00000080) {
471 		if (e18 & 0x80000000) {
472 			/* g[] read fault? */
473 			nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
474 					 tp, e14, e10 | ((e18 >> 24) & 0x1f));
475 			e18 &= ~0x1f000000;
476 		} else if (e18 & 0xc) {
477 			/* g[] write fault? */
478 			nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
479 				 tp, e14, e10 | ((e18 >> 7) & 0x1f));
480 			e18 &= ~0x00000f80;
481 		} else {
482 			nvkm_error(subdev, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
483 				 tp, e14, e10);
484 		}
485 		ustatus &= ~0x00000080;
486 	}
487 	if (ustatus) {
488 		nvkm_snprintbf(msg, sizeof(msg), nv50_gr_trap_prop, ustatus);
489 		nvkm_error(subdev, "TRAP_PROP - TP %d - %08x [%s] - "
490 				   "Address %02x%08x\n",
491 			   tp, ustatus, msg, e14, e10);
492 	}
493 	nvkm_error(subdev, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
494 		 tp, e0c, e18, e1c, e20, e24);
495 }
496 
497 static void
498 nv50_gr_mp_trap(struct nv50_gr *gr, int tpid, int display)
499 {
500 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
501 	struct nvkm_device *device = subdev->device;
502 	u32 units = nvkm_rd32(device, 0x1540);
503 	u32 addr, mp10, status, pc, oplow, ophigh;
504 	char msg[128];
505 	int i;
506 	int mps = 0;
507 	for (i = 0; i < 4; i++) {
508 		if (!(units & 1 << (i+24)))
509 			continue;
510 		if (nv_device(gr)->chipset < 0xa0)
511 			addr = 0x408200 + (tpid << 12) + (i << 7);
512 		else
513 			addr = 0x408100 + (tpid << 11) + (i << 7);
514 		mp10 = nvkm_rd32(device, addr + 0x10);
515 		status = nvkm_rd32(device, addr + 0x14);
516 		if (!status)
517 			continue;
518 		if (display) {
519 			nvkm_rd32(device, addr + 0x20);
520 			pc = nvkm_rd32(device, addr + 0x24);
521 			oplow = nvkm_rd32(device, addr + 0x70);
522 			ophigh = nvkm_rd32(device, addr + 0x74);
523 			nvkm_snprintbf(msg, sizeof(msg),
524 				       nv50_mp_exec_errors, status);
525 			nvkm_error(subdev, "TRAP_MP_EXEC - TP %d MP %d: "
526 					   "%08x [%s] at %06x warp %d, "
527 					   "opcode %08x %08x\n",
528 				   tpid, i, status, msg, pc & 0xffffff,
529 				   pc >> 24, oplow, ophigh);
530 		}
531 		nvkm_wr32(device, addr + 0x10, mp10);
532 		nvkm_wr32(device, addr + 0x14, 0);
533 		mps++;
534 	}
535 	if (!mps && display)
536 		nvkm_error(subdev, "TRAP_MP_EXEC - TP %d: "
537 				"No MPs claiming errors?\n", tpid);
538 }
539 
540 static void
541 nv50_gr_tp_trap(struct nv50_gr *gr, int type, u32 ustatus_old,
542 		  u32 ustatus_new, int display, const char *name)
543 {
544 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
545 	struct nvkm_device *device = subdev->device;
546 	u32 units = nvkm_rd32(device, 0x1540);
547 	int tps = 0;
548 	int i, r;
549 	char msg[128];
550 	u32 ustatus_addr, ustatus;
551 	for (i = 0; i < 16; i++) {
552 		if (!(units & (1 << i)))
553 			continue;
554 		if (nv_device(gr)->chipset < 0xa0)
555 			ustatus_addr = ustatus_old + (i << 12);
556 		else
557 			ustatus_addr = ustatus_new + (i << 11);
558 		ustatus = nvkm_rd32(device, ustatus_addr) & 0x7fffffff;
559 		if (!ustatus)
560 			continue;
561 		tps++;
562 		switch (type) {
563 		case 6: /* texture error... unknown for now */
564 			if (display) {
565 				nvkm_error(subdev, "magic set %d:\n", i);
566 				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
567 					nvkm_error(subdev, "\t%08x: %08x\n", r,
568 						   nvkm_rd32(device, r));
569 				if (ustatus) {
570 					nvkm_snprintbf(msg, sizeof(msg),
571 						       nv50_tex_traps, ustatus);
572 					nvkm_error(subdev,
573 						   "%s - TP%d: %08x [%s]\n",
574 						   name, i, ustatus, msg);
575 					ustatus = 0;
576 				}
577 			}
578 			break;
579 		case 7: /* MP error */
580 			if (ustatus & 0x04030000) {
581 				nv50_gr_mp_trap(gr, i, display);
582 				ustatus &= ~0x04030000;
583 			}
584 			if (ustatus && display) {
585 				nvkm_snprintbf(msg, sizeof(msg),
586 					       nv50_mpc_traps, ustatus);
587 				nvkm_error(subdev, "%s - TP%d: %08x [%s]\n",
588 					   name, i, ustatus, msg);
589 				ustatus = 0;
590 			}
591 			break;
592 		case 8: /* PROP error */
593 			if (display)
594 				nv50_gr_prop_trap(
595 						gr, ustatus_addr, ustatus, i);
596 			ustatus = 0;
597 			break;
598 		}
599 		if (ustatus) {
600 			if (display)
601 				nvkm_error(subdev, "%s - TP%d: Unhandled ustatus %08x\n", name, i, ustatus);
602 		}
603 		nvkm_wr32(device, ustatus_addr, 0xc0000000);
604 	}
605 
606 	if (!tps && display)
607 		nvkm_warn(subdev, "%s - No TPs claiming errors?\n", name);
608 }
609 
610 static int
611 nv50_gr_trap_handler(struct nv50_gr *gr, u32 display,
612 		     int chid, u64 inst, struct nvkm_object *engctx)
613 {
614 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
615 	struct nvkm_device *device = subdev->device;
616 	u32 status = nvkm_rd32(device, 0x400108);
617 	u32 ustatus;
618 	char msg[128];
619 
620 	if (!status && display) {
621 		nvkm_error(subdev, "TRAP: no units reporting traps?\n");
622 		return 1;
623 	}
624 
625 	/* DISPATCH: Relays commands to other units and handles NOTIFY,
626 	 * COND, QUERY. If you get a trap from it, the command is still stuck
627 	 * in DISPATCH and you need to do something about it. */
628 	if (status & 0x001) {
629 		ustatus = nvkm_rd32(device, 0x400804) & 0x7fffffff;
630 		if (!ustatus && display) {
631 			nvkm_error(subdev, "TRAP_DISPATCH - no ustatus?\n");
632 		}
633 
634 		nvkm_wr32(device, 0x400500, 0x00000000);
635 
636 		/* Known to be triggered by screwed up NOTIFY and COND... */
637 		if (ustatus & 0x00000001) {
638 			u32 addr = nvkm_rd32(device, 0x400808);
639 			u32 subc = (addr & 0x00070000) >> 16;
640 			u32 mthd = (addr & 0x00001ffc);
641 			u32 datal = nvkm_rd32(device, 0x40080c);
642 			u32 datah = nvkm_rd32(device, 0x400810);
643 			u32 class = nvkm_rd32(device, 0x400814);
644 			u32 r848 = nvkm_rd32(device, 0x400848);
645 
646 			nvkm_error(subdev, "TRAP DISPATCH_FAULT\n");
647 			if (display && (addr & 0x80000000)) {
648 				nvkm_error(subdev,
649 					   "ch %d [%010llx %s] subc %d "
650 					   "class %04x mthd %04x data %08x%08x "
651 					   "400808 %08x 400848 %08x\n",
652 					   chid, inst,
653 					   nvkm_client_name(engctx),
654 					   subc, class, mthd,
655 					   datah, datal, addr, r848);
656 			} else
657 			if (display) {
658 				nvkm_error(subdev, "no stuck command?\n");
659 			}
660 
661 			nvkm_wr32(device, 0x400808, 0);
662 			nvkm_wr32(device, 0x4008e8, nvkm_rd32(device, 0x4008e8) & 3);
663 			nvkm_wr32(device, 0x400848, 0);
664 			ustatus &= ~0x00000001;
665 		}
666 
667 		if (ustatus & 0x00000002) {
668 			u32 addr = nvkm_rd32(device, 0x40084c);
669 			u32 subc = (addr & 0x00070000) >> 16;
670 			u32 mthd = (addr & 0x00001ffc);
671 			u32 data = nvkm_rd32(device, 0x40085c);
672 			u32 class = nvkm_rd32(device, 0x400814);
673 
674 			nvkm_error(subdev, "TRAP DISPATCH_QUERY\n");
675 			if (display && (addr & 0x80000000)) {
676 				nvkm_error(subdev,
677 					   "ch %d [%010llx %s] subc %d "
678 					   "class %04x mthd %04x data %08x "
679 					   "40084c %08x\n", chid, inst,
680 					   nvkm_client_name(engctx), subc,
681 					   class, mthd, data, addr);
682 			} else
683 			if (display) {
684 				nvkm_error(subdev, "no stuck command?\n");
685 			}
686 
687 			nvkm_wr32(device, 0x40084c, 0);
688 			ustatus &= ~0x00000002;
689 		}
690 
691 		if (ustatus && display) {
692 			nvkm_error(subdev, "TRAP_DISPATCH "
693 					   "(unknown %08x)\n", ustatus);
694 		}
695 
696 		nvkm_wr32(device, 0x400804, 0xc0000000);
697 		nvkm_wr32(device, 0x400108, 0x001);
698 		status &= ~0x001;
699 		if (!status)
700 			return 0;
701 	}
702 
703 	/* M2MF: Memory to memory copy engine. */
704 	if (status & 0x002) {
705 		u32 ustatus = nvkm_rd32(device, 0x406800) & 0x7fffffff;
706 		if (display) {
707 			nvkm_snprintbf(msg, sizeof(msg),
708 				       nv50_gr_trap_m2mf, ustatus);
709 			nvkm_error(subdev, "TRAP_M2MF %08x [%s]\n",
710 				   ustatus, msg);
711 			nvkm_error(subdev, "TRAP_M2MF %08x %08x %08x %08x\n",
712 				   nvkm_rd32(device, 0x406804),
713 				   nvkm_rd32(device, 0x406808),
714 				   nvkm_rd32(device, 0x40680c),
715 				   nvkm_rd32(device, 0x406810));
716 		}
717 
718 		/* No sane way found yet -- just reset the bugger. */
719 		nvkm_wr32(device, 0x400040, 2);
720 		nvkm_wr32(device, 0x400040, 0);
721 		nvkm_wr32(device, 0x406800, 0xc0000000);
722 		nvkm_wr32(device, 0x400108, 0x002);
723 		status &= ~0x002;
724 	}
725 
726 	/* VFETCH: Fetches data from vertex buffers. */
727 	if (status & 0x004) {
728 		u32 ustatus = nvkm_rd32(device, 0x400c04) & 0x7fffffff;
729 		if (display) {
730 			nvkm_snprintbf(msg, sizeof(msg),
731 				       nv50_gr_trap_vfetch, ustatus);
732 			nvkm_error(subdev, "TRAP_VFETCH %08x [%s]\n",
733 				   ustatus, msg);
734 			nvkm_error(subdev, "TRAP_VFETCH %08x %08x %08x %08x\n",
735 				   nvkm_rd32(device, 0x400c00),
736 				   nvkm_rd32(device, 0x400c08),
737 				   nvkm_rd32(device, 0x400c0c),
738 				   nvkm_rd32(device, 0x400c10));
739 		}
740 
741 		nvkm_wr32(device, 0x400c04, 0xc0000000);
742 		nvkm_wr32(device, 0x400108, 0x004);
743 		status &= ~0x004;
744 	}
745 
746 	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
747 	if (status & 0x008) {
748 		ustatus = nvkm_rd32(device, 0x401800) & 0x7fffffff;
749 		if (display) {
750 			nvkm_snprintbf(msg, sizeof(msg),
751 				       nv50_gr_trap_strmout, ustatus);
752 			nvkm_error(subdev, "TRAP_STRMOUT %08x [%s]\n",
753 				   ustatus, msg);
754 			nvkm_error(subdev, "TRAP_STRMOUT %08x %08x %08x %08x\n",
755 				   nvkm_rd32(device, 0x401804),
756 				   nvkm_rd32(device, 0x401808),
757 				   nvkm_rd32(device, 0x40180c),
758 				   nvkm_rd32(device, 0x401810));
759 		}
760 
761 		/* No sane way found yet -- just reset the bugger. */
762 		nvkm_wr32(device, 0x400040, 0x80);
763 		nvkm_wr32(device, 0x400040, 0);
764 		nvkm_wr32(device, 0x401800, 0xc0000000);
765 		nvkm_wr32(device, 0x400108, 0x008);
766 		status &= ~0x008;
767 	}
768 
769 	/* CCACHE: Handles code and c[] caches and fills them. */
770 	if (status & 0x010) {
771 		ustatus = nvkm_rd32(device, 0x405018) & 0x7fffffff;
772 		if (display) {
773 			nvkm_snprintbf(msg, sizeof(msg),
774 				       nv50_gr_trap_ccache, ustatus);
775 			nvkm_error(subdev, "TRAP_CCACHE %08x [%s]\n",
776 				   ustatus, msg);
777 			nvkm_error(subdev, "TRAP_CCACHE %08x %08x %08x %08x "
778 					   "%08x %08x %08x\n",
779 				   nvkm_rd32(device, 0x405000),
780 				   nvkm_rd32(device, 0x405004),
781 				   nvkm_rd32(device, 0x405008),
782 				   nvkm_rd32(device, 0x40500c),
783 				   nvkm_rd32(device, 0x405010),
784 				   nvkm_rd32(device, 0x405014),
785 				   nvkm_rd32(device, 0x40501c));
786 		}
787 
788 		nvkm_wr32(device, 0x405018, 0xc0000000);
789 		nvkm_wr32(device, 0x400108, 0x010);
790 		status &= ~0x010;
791 	}
792 
793 	/* Unknown, not seen yet... 0x402000 is the only trap status reg
794 	 * remaining, so try to handle it anyway. Perhaps related to that
795 	 * unknown DMA slot on tesla? */
796 	if (status & 0x20) {
797 		ustatus = nvkm_rd32(device, 0x402000) & 0x7fffffff;
798 		if (display)
799 			nvkm_error(subdev, "TRAP_UNKC04 %08x\n", ustatus);
800 		nvkm_wr32(device, 0x402000, 0xc0000000);
801 		/* no status modifiction on purpose */
802 	}
803 
804 	/* TEXTURE: CUDA texturing units */
805 	if (status & 0x040) {
806 		nv50_gr_tp_trap(gr, 6, 0x408900, 0x408600, display,
807 				    "TRAP_TEXTURE");
808 		nvkm_wr32(device, 0x400108, 0x040);
809 		status &= ~0x040;
810 	}
811 
812 	/* MP: CUDA execution engines. */
813 	if (status & 0x080) {
814 		nv50_gr_tp_trap(gr, 7, 0x408314, 0x40831c, display,
815 				    "TRAP_MP");
816 		nvkm_wr32(device, 0x400108, 0x080);
817 		status &= ~0x080;
818 	}
819 
820 	/* PROP:  Handles TP-initiated uncached memory accesses:
821 	 * l[], g[], stack, 2d surfaces, render targets. */
822 	if (status & 0x100) {
823 		nv50_gr_tp_trap(gr, 8, 0x408e08, 0x408708, display,
824 				    "TRAP_PROP");
825 		nvkm_wr32(device, 0x400108, 0x100);
826 		status &= ~0x100;
827 	}
828 
829 	if (status) {
830 		if (display)
831 			nvkm_error(subdev, "TRAP: unknown %08x\n", status);
832 		nvkm_wr32(device, 0x400108, status);
833 	}
834 
835 	return 1;
836 }
837 
838 static void
839 nv50_gr_intr(struct nvkm_subdev *subdev)
840 {
841 	struct nv50_gr *gr = (void *)subdev;
842 	struct nvkm_device *device = gr->base.engine.subdev.device;
843 	struct nvkm_fifo *fifo = device->fifo;
844 	struct nvkm_engine *engine = nv_engine(subdev);
845 	struct nvkm_object *engctx;
846 	struct nvkm_handle *handle = NULL;
847 	u32 stat = nvkm_rd32(device, 0x400100);
848 	u32 inst = nvkm_rd32(device, 0x40032c) & 0x0fffffff;
849 	u32 addr = nvkm_rd32(device, 0x400704);
850 	u32 subc = (addr & 0x00070000) >> 16;
851 	u32 mthd = (addr & 0x00001ffc);
852 	u32 data = nvkm_rd32(device, 0x400708);
853 	u32 class = nvkm_rd32(device, 0x400814);
854 	u32 show = stat, show_bitfield = stat;
855 	const struct nvkm_enum *en;
856 	char msg[128];
857 	int chid;
858 
859 	engctx = nvkm_engctx_get(engine, inst);
860 	chid   = fifo->chid(fifo, engctx);
861 
862 	if (stat & 0x00000010) {
863 		handle = nvkm_handle_get_class(engctx, class);
864 		if (handle && !nv_call(handle->object, mthd, data))
865 			show &= ~0x00000010;
866 		nvkm_handle_put(handle);
867 	}
868 
869 	if (show & 0x00100000) {
870 		u32 ecode = nvkm_rd32(device, 0x400110);
871 		en = nvkm_enum_find(nv50_data_error_names, ecode);
872 		nvkm_error(subdev, "DATA_ERROR %08x [%s]\n",
873 			   ecode, en ? en->name : "");
874 		show_bitfield &= ~0x00100000;
875 	}
876 
877 	if (stat & 0x00200000) {
878 		if (!nv50_gr_trap_handler(gr, show, chid, (u64)inst << 12,
879 					  engctx))
880 			show &= ~0x00200000;
881 		show_bitfield &= ~0x00200000;
882 	}
883 
884 	nvkm_wr32(device, 0x400100, stat);
885 	nvkm_wr32(device, 0x400500, 0x00010001);
886 
887 	if (show) {
888 		show &= show_bitfield;
889 		nvkm_snprintbf(msg, sizeof(msg), nv50_gr_intr_name, show);
890 		nvkm_error(subdev, "%08x [%s] ch %d [%010llx %s] subc %d "
891 				   "class %04x mthd %04x data %08x\n",
892 			   stat, msg, chid, (u64)inst << 12,
893 			   nvkm_client_name(engctx), subc, class, mthd, data);
894 	}
895 
896 	if (nvkm_rd32(device, 0x400824) & (1 << 31))
897 		nvkm_wr32(device, 0x400824, nvkm_rd32(device, 0x400824) & ~(1 << 31));
898 
899 	nvkm_engctx_put(engctx);
900 }
901 
902 static int
903 nv50_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
904 	     struct nvkm_oclass *oclass, void *data, u32 size,
905 	     struct nvkm_object **pobject)
906 {
907 	struct nv50_gr *gr;
908 	int ret;
909 
910 	ret = nvkm_gr_create(parent, engine, oclass, true, &gr);
911 	*pobject = nv_object(gr);
912 	if (ret)
913 		return ret;
914 
915 	nv_subdev(gr)->unit = 0x00201000;
916 	nv_subdev(gr)->intr = nv50_gr_intr;
917 	nv_engine(gr)->cclass = &nv50_gr_cclass;
918 
919 	gr->base.units = nv50_gr_units;
920 
921 	switch (nv_device(gr)->chipset) {
922 	case 0x50:
923 		nv_engine(gr)->sclass = nv50_gr_sclass;
924 		break;
925 	case 0x84:
926 	case 0x86:
927 	case 0x92:
928 	case 0x94:
929 	case 0x96:
930 	case 0x98:
931 		nv_engine(gr)->sclass = g84_gr_sclass;
932 		break;
933 	case 0xa0:
934 	case 0xaa:
935 	case 0xac:
936 		nv_engine(gr)->sclass = gt200_gr_sclass;
937 		break;
938 	case 0xa3:
939 	case 0xa5:
940 	case 0xa8:
941 		nv_engine(gr)->sclass = gt215_gr_sclass;
942 		break;
943 	case 0xaf:
944 		nv_engine(gr)->sclass = mcp89_gr_sclass;
945 		break;
946 
947 	}
948 
949 	/* unfortunate hw bug workaround... */
950 	if (nv_device(gr)->chipset != 0x50 &&
951 	    nv_device(gr)->chipset != 0xac)
952 		nv_engine(gr)->tlb_flush = g84_gr_tlb_flush;
953 
954 	spin_lock_init(&gr->lock);
955 	return 0;
956 }
957 
958 static int
959 nv50_gr_init(struct nvkm_object *object)
960 {
961 	struct nv50_gr *gr = (void *)object;
962 	struct nvkm_device *device = gr->base.engine.subdev.device;
963 	int ret, units, i;
964 
965 	ret = nvkm_gr_init(&gr->base);
966 	if (ret)
967 		return ret;
968 
969 	/* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */
970 	nvkm_wr32(device, 0x40008c, 0x00000004);
971 
972 	/* reset/enable traps and interrupts */
973 	nvkm_wr32(device, 0x400804, 0xc0000000);
974 	nvkm_wr32(device, 0x406800, 0xc0000000);
975 	nvkm_wr32(device, 0x400c04, 0xc0000000);
976 	nvkm_wr32(device, 0x401800, 0xc0000000);
977 	nvkm_wr32(device, 0x405018, 0xc0000000);
978 	nvkm_wr32(device, 0x402000, 0xc0000000);
979 
980 	units = nvkm_rd32(device, 0x001540);
981 	for (i = 0; i < 16; i++) {
982 		if (!(units & (1 << i)))
983 			continue;
984 
985 		if (nv_device(gr)->chipset < 0xa0) {
986 			nvkm_wr32(device, 0x408900 + (i << 12), 0xc0000000);
987 			nvkm_wr32(device, 0x408e08 + (i << 12), 0xc0000000);
988 			nvkm_wr32(device, 0x408314 + (i << 12), 0xc0000000);
989 		} else {
990 			nvkm_wr32(device, 0x408600 + (i << 11), 0xc0000000);
991 			nvkm_wr32(device, 0x408708 + (i << 11), 0xc0000000);
992 			nvkm_wr32(device, 0x40831c + (i << 11), 0xc0000000);
993 		}
994 	}
995 
996 	nvkm_wr32(device, 0x400108, 0xffffffff);
997 	nvkm_wr32(device, 0x400138, 0xffffffff);
998 	nvkm_wr32(device, 0x400100, 0xffffffff);
999 	nvkm_wr32(device, 0x40013c, 0xffffffff);
1000 	nvkm_wr32(device, 0x400500, 0x00010001);
1001 
1002 	/* upload context program, initialise ctxctl defaults */
1003 	ret = nv50_grctx_init(nv_device(gr), &gr->size);
1004 	if (ret)
1005 		return ret;
1006 
1007 	nvkm_wr32(device, 0x400824, 0x00000000);
1008 	nvkm_wr32(device, 0x400828, 0x00000000);
1009 	nvkm_wr32(device, 0x40082c, 0x00000000);
1010 	nvkm_wr32(device, 0x400830, 0x00000000);
1011 	nvkm_wr32(device, 0x40032c, 0x00000000);
1012 	nvkm_wr32(device, 0x400330, 0x00000000);
1013 
1014 	/* some unknown zcull magic */
1015 	switch (nv_device(gr)->chipset & 0xf0) {
1016 	case 0x50:
1017 	case 0x80:
1018 	case 0x90:
1019 		nvkm_wr32(device, 0x402ca8, 0x00000800);
1020 		break;
1021 	case 0xa0:
1022 	default:
1023 		if (nv_device(gr)->chipset == 0xa0 ||
1024 		    nv_device(gr)->chipset == 0xaa ||
1025 		    nv_device(gr)->chipset == 0xac) {
1026 			nvkm_wr32(device, 0x402ca8, 0x00000802);
1027 		} else {
1028 			nvkm_wr32(device, 0x402cc0, 0x00000000);
1029 			nvkm_wr32(device, 0x402ca8, 0x00000002);
1030 		}
1031 
1032 		break;
1033 	}
1034 
1035 	/* zero out zcull regions */
1036 	for (i = 0; i < 8; i++) {
1037 		nvkm_wr32(device, 0x402c20 + (i * 0x10), 0x00000000);
1038 		nvkm_wr32(device, 0x402c24 + (i * 0x10), 0x00000000);
1039 		nvkm_wr32(device, 0x402c28 + (i * 0x10), 0x00000000);
1040 		nvkm_wr32(device, 0x402c2c + (i * 0x10), 0x00000000);
1041 	}
1042 	return 0;
1043 }
1044 
1045 struct nvkm_oclass
1046 nv50_gr_oclass = {
1047 	.handle = NV_ENGINE(GR, 0x50),
1048 	.ofuncs = &(struct nvkm_ofuncs) {
1049 		.ctor = nv50_gr_ctor,
1050 		.dtor = _nvkm_gr_dtor,
1051 		.init = nv50_gr_init,
1052 		.fini = _nvkm_gr_fini,
1053 	},
1054 };
1055