1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "nv50.h"
25 
26 #include <core/client.h>
27 #include <core/gpuobj.h>
28 #include <engine/fifo.h>
29 
30 #include <nvif/class.h>
31 
32 u64
nv50_gr_units(struct nvkm_gr * gr)33 nv50_gr_units(struct nvkm_gr *gr)
34 {
35 	return nvkm_rd32(gr->engine.subdev.device, 0x1540);
36 }
37 
38 /*******************************************************************************
39  * Graphics object classes
40  ******************************************************************************/
41 
42 static int
nv50_gr_object_bind(struct nvkm_object * object,struct nvkm_gpuobj * parent,int align,struct nvkm_gpuobj ** pgpuobj)43 nv50_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
44 		    int align, struct nvkm_gpuobj **pgpuobj)
45 {
46 	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16,
47 				  align, false, parent, pgpuobj);
48 	if (ret == 0) {
49 		nvkm_kmap(*pgpuobj);
50 		nvkm_wo32(*pgpuobj, 0x00, object->oclass);
51 		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
52 		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
53 		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
54 		nvkm_done(*pgpuobj);
55 	}
56 	return ret;
57 }
58 
59 const struct nvkm_object_func
60 nv50_gr_object = {
61 	.bind = nv50_gr_object_bind,
62 };
63 
64 /*******************************************************************************
65  * PGRAPH context
66  ******************************************************************************/
67 
68 static int
nv50_gr_chan_bind(struct nvkm_object * object,struct nvkm_gpuobj * parent,int align,struct nvkm_gpuobj ** pgpuobj)69 nv50_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
70 		  int align, struct nvkm_gpuobj **pgpuobj)
71 {
72 	struct nv50_gr *gr = nv50_gr_chan(object)->gr;
73 	int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
74 				  align, true, parent, pgpuobj);
75 	if (ret == 0) {
76 		nvkm_kmap(*pgpuobj);
77 		nv50_grctx_fill(gr->base.engine.subdev.device, *pgpuobj);
78 		nvkm_done(*pgpuobj);
79 	}
80 	return ret;
81 }
82 
83 static const struct nvkm_object_func
84 nv50_gr_chan = {
85 	.bind = nv50_gr_chan_bind,
86 };
87 
88 int
nv50_gr_chan_new(struct nvkm_gr * base,struct nvkm_chan * fifoch,const struct nvkm_oclass * oclass,struct nvkm_object ** pobject)89 nv50_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
90 		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
91 {
92 	struct nv50_gr *gr = nv50_gr(base);
93 	struct nv50_gr_chan *chan;
94 
95 	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
96 		return -ENOMEM;
97 	nvkm_object_ctor(&nv50_gr_chan, oclass, &chan->object);
98 	chan->gr = gr;
99 	*pobject = &chan->object;
100 	return 0;
101 }
102 
103 /*******************************************************************************
104  * PGRAPH engine/subdev functions
105  ******************************************************************************/
106 
107 static const struct nvkm_bitfield nv50_mp_exec_errors[] = {
108 	{ 0x01, "STACK_UNDERFLOW" },
109 	{ 0x02, "STACK_MISMATCH" },
110 	{ 0x04, "QUADON_ACTIVE" },
111 	{ 0x08, "TIMEOUT" },
112 	{ 0x10, "INVALID_OPCODE" },
113 	{ 0x20, "PM_OVERFLOW" },
114 	{ 0x40, "BREAKPOINT" },
115 	{}
116 };
117 
118 static const struct nvkm_bitfield nv50_mpc_traps[] = {
119 	{ 0x0000001, "LOCAL_LIMIT_READ" },
120 	{ 0x0000010, "LOCAL_LIMIT_WRITE" },
121 	{ 0x0000040, "STACK_LIMIT" },
122 	{ 0x0000100, "GLOBAL_LIMIT_READ" },
123 	{ 0x0001000, "GLOBAL_LIMIT_WRITE" },
124 	{ 0x0010000, "MP0" },
125 	{ 0x0020000, "MP1" },
126 	{ 0x0040000, "GLOBAL_LIMIT_RED" },
127 	{ 0x0400000, "GLOBAL_LIMIT_ATOM" },
128 	{ 0x4000000, "MP2" },
129 	{}
130 };
131 
132 static const struct nvkm_bitfield nv50_tex_traps[] = {
133 	{ 0x00000001, "" }, /* any bit set? */
134 	{ 0x00000002, "FAULT" },
135 	{ 0x00000004, "STORAGE_TYPE_MISMATCH" },
136 	{ 0x00000008, "LINEAR_MISMATCH" },
137 	{ 0x00000020, "WRONG_MEMTYPE" },
138 	{}
139 };
140 
141 static const struct nvkm_bitfield nv50_gr_trap_m2mf[] = {
142 	{ 0x00000001, "NOTIFY" },
143 	{ 0x00000002, "IN" },
144 	{ 0x00000004, "OUT" },
145 	{}
146 };
147 
148 static const struct nvkm_bitfield nv50_gr_trap_vfetch[] = {
149 	{ 0x00000001, "FAULT" },
150 	{}
151 };
152 
153 static const struct nvkm_bitfield nv50_gr_trap_strmout[] = {
154 	{ 0x00000001, "FAULT" },
155 	{}
156 };
157 
158 static const struct nvkm_bitfield nv50_gr_trap_ccache[] = {
159 	{ 0x00000001, "FAULT" },
160 	{}
161 };
162 
163 /* There must be a *lot* of these. Will take some time to gather them up. */
164 const struct nvkm_enum nv50_data_error_names[] = {
165 	{ 0x00000003, "INVALID_OPERATION", NULL },
166 	{ 0x00000004, "INVALID_VALUE", NULL },
167 	{ 0x00000005, "INVALID_ENUM", NULL },
168 	{ 0x00000008, "INVALID_OBJECT", NULL },
169 	{ 0x00000009, "READ_ONLY_OBJECT", NULL },
170 	{ 0x0000000a, "SUPERVISOR_OBJECT", NULL },
171 	{ 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
172 	{ 0x0000000c, "INVALID_BITFIELD", NULL },
173 	{ 0x0000000d, "BEGIN_END_ACTIVE", NULL },
174 	{ 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
175 	{ 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
176 	{ 0x00000010, "RT_DOUBLE_BIND", NULL },
177 	{ 0x00000011, "RT_TYPES_MISMATCH", NULL },
178 	{ 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
179 	{ 0x00000015, "FP_TOO_FEW_REGS", NULL },
180 	{ 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
181 	{ 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
182 	{ 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
183 	{ 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
184 	{ 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
185 	{ 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
186 	{ 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
187 	{ 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
188 	{ 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
189 	{ 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
190 	{ 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
191 	{ 0x00000024, "VP_ZERO_INPUTS", NULL },
192 	{ 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
193 	{ 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
194 	{ 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
195 	{ 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
196 	{ 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
197 	{ 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
198 	{ 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
199 	{ 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
200 	{ 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
201 	{ 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
202 	{ 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
203 	{ 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
204 	{ 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
205 	{ 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
206 	{ 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
207 	{}
208 };
209 
210 static const struct nvkm_bitfield nv50_gr_intr_name[] = {
211 	{ 0x00000001, "NOTIFY" },
212 	{ 0x00000002, "COMPUTE_QUERY" },
213 	{ 0x00000010, "ILLEGAL_MTHD" },
214 	{ 0x00000020, "ILLEGAL_CLASS" },
215 	{ 0x00000040, "DOUBLE_NOTIFY" },
216 	{ 0x00001000, "CONTEXT_SWITCH" },
217 	{ 0x00010000, "BUFFER_NOTIFY" },
218 	{ 0x00100000, "DATA_ERROR" },
219 	{ 0x00200000, "TRAP" },
220 	{ 0x01000000, "SINGLE_STEP" },
221 	{}
222 };
223 
224 static const struct nvkm_bitfield nv50_gr_trap_prop[] = {
225 	{ 0x00000004, "SURF_WIDTH_OVERRUN" },
226 	{ 0x00000008, "SURF_HEIGHT_OVERRUN" },
227 	{ 0x00000010, "DST2D_FAULT" },
228 	{ 0x00000020, "ZETA_FAULT" },
229 	{ 0x00000040, "RT_FAULT" },
230 	{ 0x00000080, "CUDA_FAULT" },
231 	{ 0x00000100, "DST2D_STORAGE_TYPE_MISMATCH" },
232 	{ 0x00000200, "ZETA_STORAGE_TYPE_MISMATCH" },
233 	{ 0x00000400, "RT_STORAGE_TYPE_MISMATCH" },
234 	{ 0x00000800, "DST2D_LINEAR_MISMATCH" },
235 	{ 0x00001000, "RT_LINEAR_MISMATCH" },
236 	{}
237 };
238 
239 static void
nv50_gr_prop_trap(struct nv50_gr * gr,u32 ustatus_addr,u32 ustatus,u32 tp)240 nv50_gr_prop_trap(struct nv50_gr *gr, u32 ustatus_addr, u32 ustatus, u32 tp)
241 {
242 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
243 	struct nvkm_device *device = subdev->device;
244 	u32 e0c = nvkm_rd32(device, ustatus_addr + 0x04);
245 	u32 e10 = nvkm_rd32(device, ustatus_addr + 0x08);
246 	u32 e14 = nvkm_rd32(device, ustatus_addr + 0x0c);
247 	u32 e18 = nvkm_rd32(device, ustatus_addr + 0x10);
248 	u32 e1c = nvkm_rd32(device, ustatus_addr + 0x14);
249 	u32 e20 = nvkm_rd32(device, ustatus_addr + 0x18);
250 	u32 e24 = nvkm_rd32(device, ustatus_addr + 0x1c);
251 	char msg[128];
252 
253 	/* CUDA memory: l[], g[] or stack. */
254 	if (ustatus & 0x00000080) {
255 		if (e18 & 0x80000000) {
256 			/* g[] read fault? */
257 			nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
258 					 tp, e14, e10 | ((e18 >> 24) & 0x1f));
259 			e18 &= ~0x1f000000;
260 		} else if (e18 & 0xc) {
261 			/* g[] write fault? */
262 			nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
263 				 tp, e14, e10 | ((e18 >> 7) & 0x1f));
264 			e18 &= ~0x00000f80;
265 		} else {
266 			nvkm_error(subdev, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
267 				 tp, e14, e10);
268 		}
269 		ustatus &= ~0x00000080;
270 	}
271 	if (ustatus) {
272 		nvkm_snprintbf(msg, sizeof(msg), nv50_gr_trap_prop, ustatus);
273 		nvkm_error(subdev, "TRAP_PROP - TP %d - %08x [%s] - "
274 				   "Address %02x%08x\n",
275 			   tp, ustatus, msg, e14, e10);
276 	}
277 	nvkm_error(subdev, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
278 		 tp, e0c, e18, e1c, e20, e24);
279 }
280 
281 static void
nv50_gr_mp_trap(struct nv50_gr * gr,int tpid,int display)282 nv50_gr_mp_trap(struct nv50_gr *gr, int tpid, int display)
283 {
284 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
285 	struct nvkm_device *device = subdev->device;
286 	u32 units = nvkm_rd32(device, 0x1540);
287 	u32 addr, mp10, status, pc, oplow, ophigh;
288 	char msg[128];
289 	int i;
290 	int mps = 0;
291 	for (i = 0; i < 4; i++) {
292 		if (!(units & 1 << (i+24)))
293 			continue;
294 		if (device->chipset < 0xa0)
295 			addr = 0x408200 + (tpid << 12) + (i << 7);
296 		else
297 			addr = 0x408100 + (tpid << 11) + (i << 7);
298 		mp10 = nvkm_rd32(device, addr + 0x10);
299 		status = nvkm_rd32(device, addr + 0x14);
300 		if (!status)
301 			continue;
302 		if (display) {
303 			nvkm_rd32(device, addr + 0x20);
304 			pc = nvkm_rd32(device, addr + 0x24);
305 			oplow = nvkm_rd32(device, addr + 0x70);
306 			ophigh = nvkm_rd32(device, addr + 0x74);
307 			nvkm_snprintbf(msg, sizeof(msg),
308 				       nv50_mp_exec_errors, status);
309 			nvkm_error(subdev, "TRAP_MP_EXEC - TP %d MP %d: "
310 					   "%08x [%s] at %06x warp %d, "
311 					   "opcode %08x %08x\n",
312 				   tpid, i, status, msg, pc & 0xffffff,
313 				   pc >> 24, oplow, ophigh);
314 		}
315 		nvkm_wr32(device, addr + 0x10, mp10);
316 		nvkm_wr32(device, addr + 0x14, 0);
317 		mps++;
318 	}
319 	if (!mps && display)
320 		nvkm_error(subdev, "TRAP_MP_EXEC - TP %d: "
321 				"No MPs claiming errors?\n", tpid);
322 }
323 
324 static void
nv50_gr_tp_trap(struct nv50_gr * gr,int type,u32 ustatus_old,u32 ustatus_new,int display,const char * name)325 nv50_gr_tp_trap(struct nv50_gr *gr, int type, u32 ustatus_old,
326 		  u32 ustatus_new, int display, const char *name)
327 {
328 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
329 	struct nvkm_device *device = subdev->device;
330 	u32 units = nvkm_rd32(device, 0x1540);
331 	int tps = 0;
332 	int i, r;
333 	char msg[128];
334 	u32 ustatus_addr, ustatus;
335 	for (i = 0; i < 16; i++) {
336 		if (!(units & (1 << i)))
337 			continue;
338 		if (device->chipset < 0xa0)
339 			ustatus_addr = ustatus_old + (i << 12);
340 		else
341 			ustatus_addr = ustatus_new + (i << 11);
342 		ustatus = nvkm_rd32(device, ustatus_addr) & 0x7fffffff;
343 		if (!ustatus)
344 			continue;
345 		tps++;
346 		switch (type) {
347 		case 6: /* texture error... unknown for now */
348 			if (display) {
349 				nvkm_error(subdev, "magic set %d:\n", i);
350 				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
351 					nvkm_error(subdev, "\t%08x: %08x\n", r,
352 						   nvkm_rd32(device, r));
353 				if (ustatus) {
354 					nvkm_snprintbf(msg, sizeof(msg),
355 						       nv50_tex_traps, ustatus);
356 					nvkm_error(subdev,
357 						   "%s - TP%d: %08x [%s]\n",
358 						   name, i, ustatus, msg);
359 					ustatus = 0;
360 				}
361 			}
362 			break;
363 		case 7: /* MP error */
364 			if (ustatus & 0x04030000) {
365 				nv50_gr_mp_trap(gr, i, display);
366 				ustatus &= ~0x04030000;
367 			}
368 			if (ustatus && display) {
369 				nvkm_snprintbf(msg, sizeof(msg),
370 					       nv50_mpc_traps, ustatus);
371 				nvkm_error(subdev, "%s - TP%d: %08x [%s]\n",
372 					   name, i, ustatus, msg);
373 				ustatus = 0;
374 			}
375 			break;
376 		case 8: /* PROP error */
377 			if (display)
378 				nv50_gr_prop_trap(
379 						gr, ustatus_addr, ustatus, i);
380 			ustatus = 0;
381 			break;
382 		}
383 		if (ustatus) {
384 			if (display)
385 				nvkm_error(subdev, "%s - TP%d: Unhandled ustatus %08x\n", name, i, ustatus);
386 		}
387 		nvkm_wr32(device, ustatus_addr, 0xc0000000);
388 	}
389 
390 	if (!tps && display)
391 		nvkm_warn(subdev, "%s - No TPs claiming errors?\n", name);
392 }
393 
394 static int
nv50_gr_trap_handler(struct nv50_gr * gr,u32 display,int chid,u64 inst,const char * name)395 nv50_gr_trap_handler(struct nv50_gr *gr, u32 display,
396 		     int chid, u64 inst, const char *name)
397 {
398 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
399 	struct nvkm_device *device = subdev->device;
400 	u32 status = nvkm_rd32(device, 0x400108);
401 	u32 ustatus;
402 	char msg[128];
403 
404 	if (!status && display) {
405 		nvkm_error(subdev, "TRAP: no units reporting traps?\n");
406 		return 1;
407 	}
408 
409 	/* DISPATCH: Relays commands to other units and handles NOTIFY,
410 	 * COND, QUERY. If you get a trap from it, the command is still stuck
411 	 * in DISPATCH and you need to do something about it. */
412 	if (status & 0x001) {
413 		ustatus = nvkm_rd32(device, 0x400804) & 0x7fffffff;
414 		if (!ustatus && display) {
415 			nvkm_error(subdev, "TRAP_DISPATCH - no ustatus?\n");
416 		}
417 
418 		nvkm_wr32(device, 0x400500, 0x00000000);
419 
420 		/* Known to be triggered by screwed up NOTIFY and COND... */
421 		if (ustatus & 0x00000001) {
422 			u32 addr = nvkm_rd32(device, 0x400808);
423 			u32 subc = (addr & 0x00070000) >> 16;
424 			u32 mthd = (addr & 0x00001ffc);
425 			u32 datal = nvkm_rd32(device, 0x40080c);
426 			u32 datah = nvkm_rd32(device, 0x400810);
427 			u32 class = nvkm_rd32(device, 0x400814);
428 			u32 r848 = nvkm_rd32(device, 0x400848);
429 
430 			nvkm_error(subdev, "TRAP DISPATCH_FAULT\n");
431 			if (display && (addr & 0x80000000)) {
432 				nvkm_error(subdev,
433 					   "ch %d [%010llx %s] subc %d "
434 					   "class %04x mthd %04x data %08x%08x "
435 					   "400808 %08x 400848 %08x\n",
436 					   chid, inst, name, subc, class, mthd,
437 					   datah, datal, addr, r848);
438 			} else
439 			if (display) {
440 				nvkm_error(subdev, "no stuck command?\n");
441 			}
442 
443 			nvkm_wr32(device, 0x400808, 0);
444 			nvkm_wr32(device, 0x4008e8, nvkm_rd32(device, 0x4008e8) & 3);
445 			nvkm_wr32(device, 0x400848, 0);
446 			ustatus &= ~0x00000001;
447 		}
448 
449 		if (ustatus & 0x00000002) {
450 			u32 addr = nvkm_rd32(device, 0x40084c);
451 			u32 subc = (addr & 0x00070000) >> 16;
452 			u32 mthd = (addr & 0x00001ffc);
453 			u32 data = nvkm_rd32(device, 0x40085c);
454 			u32 class = nvkm_rd32(device, 0x400814);
455 
456 			nvkm_error(subdev, "TRAP DISPATCH_QUERY\n");
457 			if (display && (addr & 0x80000000)) {
458 				nvkm_error(subdev,
459 					   "ch %d [%010llx %s] subc %d "
460 					   "class %04x mthd %04x data %08x "
461 					   "40084c %08x\n", chid, inst, name,
462 					   subc, class, mthd, data, addr);
463 			} else
464 			if (display) {
465 				nvkm_error(subdev, "no stuck command?\n");
466 			}
467 
468 			nvkm_wr32(device, 0x40084c, 0);
469 			ustatus &= ~0x00000002;
470 		}
471 
472 		if (ustatus && display) {
473 			nvkm_error(subdev, "TRAP_DISPATCH "
474 					   "(unknown %08x)\n", ustatus);
475 		}
476 
477 		nvkm_wr32(device, 0x400804, 0xc0000000);
478 		nvkm_wr32(device, 0x400108, 0x001);
479 		status &= ~0x001;
480 		if (!status)
481 			return 0;
482 	}
483 
484 	/* M2MF: Memory to memory copy engine. */
485 	if (status & 0x002) {
486 		u32 ustatus = nvkm_rd32(device, 0x406800) & 0x7fffffff;
487 		if (display) {
488 			nvkm_snprintbf(msg, sizeof(msg),
489 				       nv50_gr_trap_m2mf, ustatus);
490 			nvkm_error(subdev, "TRAP_M2MF %08x [%s]\n",
491 				   ustatus, msg);
492 			nvkm_error(subdev, "TRAP_M2MF %08x %08x %08x %08x\n",
493 				   nvkm_rd32(device, 0x406804),
494 				   nvkm_rd32(device, 0x406808),
495 				   nvkm_rd32(device, 0x40680c),
496 				   nvkm_rd32(device, 0x406810));
497 		}
498 
499 		/* No sane way found yet -- just reset the bugger. */
500 		nvkm_wr32(device, 0x400040, 2);
501 		nvkm_wr32(device, 0x400040, 0);
502 		nvkm_wr32(device, 0x406800, 0xc0000000);
503 		nvkm_wr32(device, 0x400108, 0x002);
504 		status &= ~0x002;
505 	}
506 
507 	/* VFETCH: Fetches data from vertex buffers. */
508 	if (status & 0x004) {
509 		u32 ustatus = nvkm_rd32(device, 0x400c04) & 0x7fffffff;
510 		if (display) {
511 			nvkm_snprintbf(msg, sizeof(msg),
512 				       nv50_gr_trap_vfetch, ustatus);
513 			nvkm_error(subdev, "TRAP_VFETCH %08x [%s]\n",
514 				   ustatus, msg);
515 			nvkm_error(subdev, "TRAP_VFETCH %08x %08x %08x %08x\n",
516 				   nvkm_rd32(device, 0x400c00),
517 				   nvkm_rd32(device, 0x400c08),
518 				   nvkm_rd32(device, 0x400c0c),
519 				   nvkm_rd32(device, 0x400c10));
520 		}
521 
522 		nvkm_wr32(device, 0x400c04, 0xc0000000);
523 		nvkm_wr32(device, 0x400108, 0x004);
524 		status &= ~0x004;
525 	}
526 
527 	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
528 	if (status & 0x008) {
529 		ustatus = nvkm_rd32(device, 0x401800) & 0x7fffffff;
530 		if (display) {
531 			nvkm_snprintbf(msg, sizeof(msg),
532 				       nv50_gr_trap_strmout, ustatus);
533 			nvkm_error(subdev, "TRAP_STRMOUT %08x [%s]\n",
534 				   ustatus, msg);
535 			nvkm_error(subdev, "TRAP_STRMOUT %08x %08x %08x %08x\n",
536 				   nvkm_rd32(device, 0x401804),
537 				   nvkm_rd32(device, 0x401808),
538 				   nvkm_rd32(device, 0x40180c),
539 				   nvkm_rd32(device, 0x401810));
540 		}
541 
542 		/* No sane way found yet -- just reset the bugger. */
543 		nvkm_wr32(device, 0x400040, 0x80);
544 		nvkm_wr32(device, 0x400040, 0);
545 		nvkm_wr32(device, 0x401800, 0xc0000000);
546 		nvkm_wr32(device, 0x400108, 0x008);
547 		status &= ~0x008;
548 	}
549 
550 	/* CCACHE: Handles code and c[] caches and fills them. */
551 	if (status & 0x010) {
552 		ustatus = nvkm_rd32(device, 0x405018) & 0x7fffffff;
553 		if (display) {
554 			nvkm_snprintbf(msg, sizeof(msg),
555 				       nv50_gr_trap_ccache, ustatus);
556 			nvkm_error(subdev, "TRAP_CCACHE %08x [%s]\n",
557 				   ustatus, msg);
558 			nvkm_error(subdev, "TRAP_CCACHE %08x %08x %08x %08x "
559 					   "%08x %08x %08x\n",
560 				   nvkm_rd32(device, 0x405000),
561 				   nvkm_rd32(device, 0x405004),
562 				   nvkm_rd32(device, 0x405008),
563 				   nvkm_rd32(device, 0x40500c),
564 				   nvkm_rd32(device, 0x405010),
565 				   nvkm_rd32(device, 0x405014),
566 				   nvkm_rd32(device, 0x40501c));
567 		}
568 
569 		nvkm_wr32(device, 0x405018, 0xc0000000);
570 		nvkm_wr32(device, 0x400108, 0x010);
571 		status &= ~0x010;
572 	}
573 
574 	/* Unknown, not seen yet... 0x402000 is the only trap status reg
575 	 * remaining, so try to handle it anyway. Perhaps related to that
576 	 * unknown DMA slot on tesla? */
577 	if (status & 0x20) {
578 		ustatus = nvkm_rd32(device, 0x402000) & 0x7fffffff;
579 		if (display)
580 			nvkm_error(subdev, "TRAP_UNKC04 %08x\n", ustatus);
581 		nvkm_wr32(device, 0x402000, 0xc0000000);
582 		/* no status modifiction on purpose */
583 	}
584 
585 	/* TEXTURE: CUDA texturing units */
586 	if (status & 0x040) {
587 		nv50_gr_tp_trap(gr, 6, 0x408900, 0x408600, display,
588 				    "TRAP_TEXTURE");
589 		nvkm_wr32(device, 0x400108, 0x040);
590 		status &= ~0x040;
591 	}
592 
593 	/* MP: CUDA execution engines. */
594 	if (status & 0x080) {
595 		nv50_gr_tp_trap(gr, 7, 0x408314, 0x40831c, display,
596 				    "TRAP_MP");
597 		nvkm_wr32(device, 0x400108, 0x080);
598 		status &= ~0x080;
599 	}
600 
601 	/* PROP:  Handles TP-initiated uncached memory accesses:
602 	 * l[], g[], stack, 2d surfaces, render targets. */
603 	if (status & 0x100) {
604 		nv50_gr_tp_trap(gr, 8, 0x408e08, 0x408708, display,
605 				    "TRAP_PROP");
606 		nvkm_wr32(device, 0x400108, 0x100);
607 		status &= ~0x100;
608 	}
609 
610 	if (status) {
611 		if (display)
612 			nvkm_error(subdev, "TRAP: unknown %08x\n", status);
613 		nvkm_wr32(device, 0x400108, status);
614 	}
615 
616 	return 1;
617 }
618 
619 void
nv50_gr_intr(struct nvkm_gr * base)620 nv50_gr_intr(struct nvkm_gr *base)
621 {
622 	struct nv50_gr *gr = nv50_gr(base);
623 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
624 	struct nvkm_device *device = subdev->device;
625 	struct nvkm_chan *chan;
626 	u32 stat = nvkm_rd32(device, 0x400100);
627 	u32 inst = nvkm_rd32(device, 0x40032c) & 0x0fffffff;
628 	u32 addr = nvkm_rd32(device, 0x400704);
629 	u32 subc = (addr & 0x00070000) >> 16;
630 	u32 mthd = (addr & 0x00001ffc);
631 	u32 data = nvkm_rd32(device, 0x400708);
632 	u32 class = nvkm_rd32(device, 0x400814);
633 	u32 show = stat, show_bitfield = stat;
634 	const struct nvkm_enum *en;
635 	unsigned long flags;
636 	const char *name = "unknown";
637 	char msg[128];
638 	int chid = -1;
639 
640 	chan = nvkm_chan_get_inst(&gr->base.engine, (u64)inst << 12, &flags);
641 	if (chan)  {
642 		name = chan->name;
643 		chid = chan->id;
644 	}
645 
646 	if (show & 0x00100000) {
647 		u32 ecode = nvkm_rd32(device, 0x400110);
648 		en = nvkm_enum_find(nv50_data_error_names, ecode);
649 		nvkm_error(subdev, "DATA_ERROR %08x [%s]\n",
650 			   ecode, en ? en->name : "");
651 		show_bitfield &= ~0x00100000;
652 	}
653 
654 	if (stat & 0x00200000) {
655 		if (!nv50_gr_trap_handler(gr, show, chid, (u64)inst << 12, name))
656 			show &= ~0x00200000;
657 		show_bitfield &= ~0x00200000;
658 	}
659 
660 	nvkm_wr32(device, 0x400100, stat);
661 	nvkm_wr32(device, 0x400500, 0x00010001);
662 
663 	if (show) {
664 		show &= show_bitfield;
665 		nvkm_snprintbf(msg, sizeof(msg), nv50_gr_intr_name, show);
666 		nvkm_error(subdev, "%08x [%s] ch %d [%010llx %s] subc %d "
667 				   "class %04x mthd %04x data %08x\n",
668 			   stat, msg, chid, (u64)inst << 12, name,
669 			   subc, class, mthd, data);
670 	}
671 
672 	if (nvkm_rd32(device, 0x400824) & (1 << 31))
673 		nvkm_wr32(device, 0x400824, nvkm_rd32(device, 0x400824) & ~(1 << 31));
674 
675 	nvkm_chan_put(&chan, flags);
676 }
677 
678 int
nv50_gr_init(struct nvkm_gr * base)679 nv50_gr_init(struct nvkm_gr *base)
680 {
681 	struct nv50_gr *gr = nv50_gr(base);
682 	struct nvkm_device *device = gr->base.engine.subdev.device;
683 	int ret, units, i;
684 
685 	/* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */
686 	nvkm_wr32(device, 0x40008c, 0x00000004);
687 
688 	/* reset/enable traps and interrupts */
689 	nvkm_wr32(device, 0x400804, 0xc0000000);
690 	nvkm_wr32(device, 0x406800, 0xc0000000);
691 	nvkm_wr32(device, 0x400c04, 0xc0000000);
692 	nvkm_wr32(device, 0x401800, 0xc0000000);
693 	nvkm_wr32(device, 0x405018, 0xc0000000);
694 	nvkm_wr32(device, 0x402000, 0xc0000000);
695 
696 	units = nvkm_rd32(device, 0x001540);
697 	for (i = 0; i < 16; i++) {
698 		if (!(units & (1 << i)))
699 			continue;
700 
701 		if (device->chipset < 0xa0) {
702 			nvkm_wr32(device, 0x408900 + (i << 12), 0xc0000000);
703 			nvkm_wr32(device, 0x408e08 + (i << 12), 0xc0000000);
704 			nvkm_wr32(device, 0x408314 + (i << 12), 0xc0000000);
705 		} else {
706 			nvkm_wr32(device, 0x408600 + (i << 11), 0xc0000000);
707 			nvkm_wr32(device, 0x408708 + (i << 11), 0xc0000000);
708 			nvkm_wr32(device, 0x40831c + (i << 11), 0xc0000000);
709 		}
710 	}
711 
712 	nvkm_wr32(device, 0x400108, 0xffffffff);
713 	nvkm_wr32(device, 0x400138, 0xffffffff);
714 	nvkm_wr32(device, 0x400100, 0xffffffff);
715 	nvkm_wr32(device, 0x40013c, 0xffffffff);
716 	nvkm_wr32(device, 0x400500, 0x00010001);
717 
718 	/* upload context program, initialise ctxctl defaults */
719 	ret = nv50_grctx_init(device, &gr->size);
720 	if (ret)
721 		return ret;
722 
723 	nvkm_wr32(device, 0x400824, 0x00000000);
724 	nvkm_wr32(device, 0x400828, 0x00000000);
725 	nvkm_wr32(device, 0x40082c, 0x00000000);
726 	nvkm_wr32(device, 0x400830, 0x00000000);
727 	nvkm_wr32(device, 0x40032c, 0x00000000);
728 	nvkm_wr32(device, 0x400330, 0x00000000);
729 
730 	/* some unknown zcull magic */
731 	switch (device->chipset & 0xf0) {
732 	case 0x50:
733 	case 0x80:
734 	case 0x90:
735 		nvkm_wr32(device, 0x402ca8, 0x00000800);
736 		break;
737 	case 0xa0:
738 	default:
739 		if (device->chipset == 0xa0 ||
740 		    device->chipset == 0xaa ||
741 		    device->chipset == 0xac) {
742 			nvkm_wr32(device, 0x402ca8, 0x00000802);
743 		} else {
744 			nvkm_wr32(device, 0x402cc0, 0x00000000);
745 			nvkm_wr32(device, 0x402ca8, 0x00000002);
746 		}
747 
748 		break;
749 	}
750 
751 	/* zero out zcull regions */
752 	for (i = 0; i < 8; i++) {
753 		nvkm_wr32(device, 0x402c20 + (i * 0x10), 0x00000000);
754 		nvkm_wr32(device, 0x402c24 + (i * 0x10), 0x00000000);
755 		nvkm_wr32(device, 0x402c28 + (i * 0x10), 0x00000000);
756 		nvkm_wr32(device, 0x402c2c + (i * 0x10), 0x00000000);
757 	}
758 
759 	return 0;
760 }
761 
762 int
nv50_gr_new_(const struct nvkm_gr_func * func,struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_gr ** pgr)763 nv50_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
764 	     enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
765 {
766 	struct nv50_gr *gr;
767 
768 	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
769 		return -ENOMEM;
770 	spin_lock_init(&gr->lock);
771 	*pgr = &gr->base;
772 
773 	return nvkm_gr_ctor(func, device, type, inst, true, &gr->base);
774 }
775 
776 static const struct nvkm_gr_func
777 nv50_gr = {
778 	.init = nv50_gr_init,
779 	.intr = nv50_gr_intr,
780 	.chan_new = nv50_gr_chan_new,
781 	.units = nv50_gr_units,
782 	.sclass = {
783 		{ -1, -1, NV_NULL_CLASS, &nv50_gr_object },
784 		{ -1, -1, NV50_TWOD, &nv50_gr_object },
785 		{ -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object },
786 		{ -1, -1, NV50_TESLA, &nv50_gr_object },
787 		{ -1, -1, NV50_COMPUTE, &nv50_gr_object },
788 		{}
789 	}
790 };
791 
792 int
nv50_gr_new(struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_gr ** pgr)793 nv50_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
794 {
795 	return nv50_gr_new_(&nv50_gr, device, type, inst, pgr);
796 }
797