1 /*
2  * Copyright 2007 Stephane Marchesin
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragr) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  */
24 #include "priv.h"
25 #include "regs.h"
26 
27 #include <core/client.h>
28 #include <core/gpuobj.h>
29 #include <engine/fifo.h>
30 #include <engine/fifo/chan.h>
31 #include <subdev/instmem.h>
32 #include <subdev/timer.h>
33 
34 static u32
35 nv04_gr_ctx_regs[] = {
36 	0x0040053c,
37 	0x00400544,
38 	0x00400540,
39 	0x00400548,
40 	NV04_PGRAPH_CTX_SWITCH1,
41 	NV04_PGRAPH_CTX_SWITCH2,
42 	NV04_PGRAPH_CTX_SWITCH3,
43 	NV04_PGRAPH_CTX_SWITCH4,
44 	NV04_PGRAPH_CTX_CACHE1,
45 	NV04_PGRAPH_CTX_CACHE2,
46 	NV04_PGRAPH_CTX_CACHE3,
47 	NV04_PGRAPH_CTX_CACHE4,
48 	0x00400184,
49 	0x004001a4,
50 	0x004001c4,
51 	0x004001e4,
52 	0x00400188,
53 	0x004001a8,
54 	0x004001c8,
55 	0x004001e8,
56 	0x0040018c,
57 	0x004001ac,
58 	0x004001cc,
59 	0x004001ec,
60 	0x00400190,
61 	0x004001b0,
62 	0x004001d0,
63 	0x004001f0,
64 	0x00400194,
65 	0x004001b4,
66 	0x004001d4,
67 	0x004001f4,
68 	0x00400198,
69 	0x004001b8,
70 	0x004001d8,
71 	0x004001f8,
72 	0x0040019c,
73 	0x004001bc,
74 	0x004001dc,
75 	0x004001fc,
76 	0x00400174,
77 	NV04_PGRAPH_DMA_START_0,
78 	NV04_PGRAPH_DMA_START_1,
79 	NV04_PGRAPH_DMA_LENGTH,
80 	NV04_PGRAPH_DMA_MISC,
81 	NV04_PGRAPH_DMA_PITCH,
82 	NV04_PGRAPH_BOFFSET0,
83 	NV04_PGRAPH_BBASE0,
84 	NV04_PGRAPH_BLIMIT0,
85 	NV04_PGRAPH_BOFFSET1,
86 	NV04_PGRAPH_BBASE1,
87 	NV04_PGRAPH_BLIMIT1,
88 	NV04_PGRAPH_BOFFSET2,
89 	NV04_PGRAPH_BBASE2,
90 	NV04_PGRAPH_BLIMIT2,
91 	NV04_PGRAPH_BOFFSET3,
92 	NV04_PGRAPH_BBASE3,
93 	NV04_PGRAPH_BLIMIT3,
94 	NV04_PGRAPH_BOFFSET4,
95 	NV04_PGRAPH_BBASE4,
96 	NV04_PGRAPH_BLIMIT4,
97 	NV04_PGRAPH_BOFFSET5,
98 	NV04_PGRAPH_BBASE5,
99 	NV04_PGRAPH_BLIMIT5,
100 	NV04_PGRAPH_BPITCH0,
101 	NV04_PGRAPH_BPITCH1,
102 	NV04_PGRAPH_BPITCH2,
103 	NV04_PGRAPH_BPITCH3,
104 	NV04_PGRAPH_BPITCH4,
105 	NV04_PGRAPH_SURFACE,
106 	NV04_PGRAPH_STATE,
107 	NV04_PGRAPH_BSWIZZLE2,
108 	NV04_PGRAPH_BSWIZZLE5,
109 	NV04_PGRAPH_BPIXEL,
110 	NV04_PGRAPH_NOTIFY,
111 	NV04_PGRAPH_PATT_COLOR0,
112 	NV04_PGRAPH_PATT_COLOR1,
113 	NV04_PGRAPH_PATT_COLORRAM+0x00,
114 	NV04_PGRAPH_PATT_COLORRAM+0x04,
115 	NV04_PGRAPH_PATT_COLORRAM+0x08,
116 	NV04_PGRAPH_PATT_COLORRAM+0x0c,
117 	NV04_PGRAPH_PATT_COLORRAM+0x10,
118 	NV04_PGRAPH_PATT_COLORRAM+0x14,
119 	NV04_PGRAPH_PATT_COLORRAM+0x18,
120 	NV04_PGRAPH_PATT_COLORRAM+0x1c,
121 	NV04_PGRAPH_PATT_COLORRAM+0x20,
122 	NV04_PGRAPH_PATT_COLORRAM+0x24,
123 	NV04_PGRAPH_PATT_COLORRAM+0x28,
124 	NV04_PGRAPH_PATT_COLORRAM+0x2c,
125 	NV04_PGRAPH_PATT_COLORRAM+0x30,
126 	NV04_PGRAPH_PATT_COLORRAM+0x34,
127 	NV04_PGRAPH_PATT_COLORRAM+0x38,
128 	NV04_PGRAPH_PATT_COLORRAM+0x3c,
129 	NV04_PGRAPH_PATT_COLORRAM+0x40,
130 	NV04_PGRAPH_PATT_COLORRAM+0x44,
131 	NV04_PGRAPH_PATT_COLORRAM+0x48,
132 	NV04_PGRAPH_PATT_COLORRAM+0x4c,
133 	NV04_PGRAPH_PATT_COLORRAM+0x50,
134 	NV04_PGRAPH_PATT_COLORRAM+0x54,
135 	NV04_PGRAPH_PATT_COLORRAM+0x58,
136 	NV04_PGRAPH_PATT_COLORRAM+0x5c,
137 	NV04_PGRAPH_PATT_COLORRAM+0x60,
138 	NV04_PGRAPH_PATT_COLORRAM+0x64,
139 	NV04_PGRAPH_PATT_COLORRAM+0x68,
140 	NV04_PGRAPH_PATT_COLORRAM+0x6c,
141 	NV04_PGRAPH_PATT_COLORRAM+0x70,
142 	NV04_PGRAPH_PATT_COLORRAM+0x74,
143 	NV04_PGRAPH_PATT_COLORRAM+0x78,
144 	NV04_PGRAPH_PATT_COLORRAM+0x7c,
145 	NV04_PGRAPH_PATT_COLORRAM+0x80,
146 	NV04_PGRAPH_PATT_COLORRAM+0x84,
147 	NV04_PGRAPH_PATT_COLORRAM+0x88,
148 	NV04_PGRAPH_PATT_COLORRAM+0x8c,
149 	NV04_PGRAPH_PATT_COLORRAM+0x90,
150 	NV04_PGRAPH_PATT_COLORRAM+0x94,
151 	NV04_PGRAPH_PATT_COLORRAM+0x98,
152 	NV04_PGRAPH_PATT_COLORRAM+0x9c,
153 	NV04_PGRAPH_PATT_COLORRAM+0xa0,
154 	NV04_PGRAPH_PATT_COLORRAM+0xa4,
155 	NV04_PGRAPH_PATT_COLORRAM+0xa8,
156 	NV04_PGRAPH_PATT_COLORRAM+0xac,
157 	NV04_PGRAPH_PATT_COLORRAM+0xb0,
158 	NV04_PGRAPH_PATT_COLORRAM+0xb4,
159 	NV04_PGRAPH_PATT_COLORRAM+0xb8,
160 	NV04_PGRAPH_PATT_COLORRAM+0xbc,
161 	NV04_PGRAPH_PATT_COLORRAM+0xc0,
162 	NV04_PGRAPH_PATT_COLORRAM+0xc4,
163 	NV04_PGRAPH_PATT_COLORRAM+0xc8,
164 	NV04_PGRAPH_PATT_COLORRAM+0xcc,
165 	NV04_PGRAPH_PATT_COLORRAM+0xd0,
166 	NV04_PGRAPH_PATT_COLORRAM+0xd4,
167 	NV04_PGRAPH_PATT_COLORRAM+0xd8,
168 	NV04_PGRAPH_PATT_COLORRAM+0xdc,
169 	NV04_PGRAPH_PATT_COLORRAM+0xe0,
170 	NV04_PGRAPH_PATT_COLORRAM+0xe4,
171 	NV04_PGRAPH_PATT_COLORRAM+0xe8,
172 	NV04_PGRAPH_PATT_COLORRAM+0xec,
173 	NV04_PGRAPH_PATT_COLORRAM+0xf0,
174 	NV04_PGRAPH_PATT_COLORRAM+0xf4,
175 	NV04_PGRAPH_PATT_COLORRAM+0xf8,
176 	NV04_PGRAPH_PATT_COLORRAM+0xfc,
177 	NV04_PGRAPH_PATTERN,
178 	0x0040080c,
179 	NV04_PGRAPH_PATTERN_SHAPE,
180 	0x00400600,
181 	NV04_PGRAPH_ROP3,
182 	NV04_PGRAPH_CHROMA,
183 	NV04_PGRAPH_BETA_AND,
184 	NV04_PGRAPH_BETA_PREMULT,
185 	NV04_PGRAPH_CONTROL0,
186 	NV04_PGRAPH_CONTROL1,
187 	NV04_PGRAPH_CONTROL2,
188 	NV04_PGRAPH_BLEND,
189 	NV04_PGRAPH_STORED_FMT,
190 	NV04_PGRAPH_SOURCE_COLOR,
191 	0x00400560,
192 	0x00400568,
193 	0x00400564,
194 	0x0040056c,
195 	0x00400400,
196 	0x00400480,
197 	0x00400404,
198 	0x00400484,
199 	0x00400408,
200 	0x00400488,
201 	0x0040040c,
202 	0x0040048c,
203 	0x00400410,
204 	0x00400490,
205 	0x00400414,
206 	0x00400494,
207 	0x00400418,
208 	0x00400498,
209 	0x0040041c,
210 	0x0040049c,
211 	0x00400420,
212 	0x004004a0,
213 	0x00400424,
214 	0x004004a4,
215 	0x00400428,
216 	0x004004a8,
217 	0x0040042c,
218 	0x004004ac,
219 	0x00400430,
220 	0x004004b0,
221 	0x00400434,
222 	0x004004b4,
223 	0x00400438,
224 	0x004004b8,
225 	0x0040043c,
226 	0x004004bc,
227 	0x00400440,
228 	0x004004c0,
229 	0x00400444,
230 	0x004004c4,
231 	0x00400448,
232 	0x004004c8,
233 	0x0040044c,
234 	0x004004cc,
235 	0x00400450,
236 	0x004004d0,
237 	0x00400454,
238 	0x004004d4,
239 	0x00400458,
240 	0x004004d8,
241 	0x0040045c,
242 	0x004004dc,
243 	0x00400460,
244 	0x004004e0,
245 	0x00400464,
246 	0x004004e4,
247 	0x00400468,
248 	0x004004e8,
249 	0x0040046c,
250 	0x004004ec,
251 	0x00400470,
252 	0x004004f0,
253 	0x00400474,
254 	0x004004f4,
255 	0x00400478,
256 	0x004004f8,
257 	0x0040047c,
258 	0x004004fc,
259 	0x00400534,
260 	0x00400538,
261 	0x00400514,
262 	0x00400518,
263 	0x0040051c,
264 	0x00400520,
265 	0x00400524,
266 	0x00400528,
267 	0x0040052c,
268 	0x00400530,
269 	0x00400d00,
270 	0x00400d40,
271 	0x00400d80,
272 	0x00400d04,
273 	0x00400d44,
274 	0x00400d84,
275 	0x00400d08,
276 	0x00400d48,
277 	0x00400d88,
278 	0x00400d0c,
279 	0x00400d4c,
280 	0x00400d8c,
281 	0x00400d10,
282 	0x00400d50,
283 	0x00400d90,
284 	0x00400d14,
285 	0x00400d54,
286 	0x00400d94,
287 	0x00400d18,
288 	0x00400d58,
289 	0x00400d98,
290 	0x00400d1c,
291 	0x00400d5c,
292 	0x00400d9c,
293 	0x00400d20,
294 	0x00400d60,
295 	0x00400da0,
296 	0x00400d24,
297 	0x00400d64,
298 	0x00400da4,
299 	0x00400d28,
300 	0x00400d68,
301 	0x00400da8,
302 	0x00400d2c,
303 	0x00400d6c,
304 	0x00400dac,
305 	0x00400d30,
306 	0x00400d70,
307 	0x00400db0,
308 	0x00400d34,
309 	0x00400d74,
310 	0x00400db4,
311 	0x00400d38,
312 	0x00400d78,
313 	0x00400db8,
314 	0x00400d3c,
315 	0x00400d7c,
316 	0x00400dbc,
317 	0x00400590,
318 	0x00400594,
319 	0x00400598,
320 	0x0040059c,
321 	0x004005a8,
322 	0x004005ac,
323 	0x004005b0,
324 	0x004005b4,
325 	0x004005c0,
326 	0x004005c4,
327 	0x004005c8,
328 	0x004005cc,
329 	0x004005d0,
330 	0x004005d4,
331 	0x004005d8,
332 	0x004005dc,
333 	0x004005e0,
334 	NV04_PGRAPH_PASSTHRU_0,
335 	NV04_PGRAPH_PASSTHRU_1,
336 	NV04_PGRAPH_PASSTHRU_2,
337 	NV04_PGRAPH_DVD_COLORFMT,
338 	NV04_PGRAPH_SCALED_FORMAT,
339 	NV04_PGRAPH_MISC24_0,
340 	NV04_PGRAPH_MISC24_1,
341 	NV04_PGRAPH_MISC24_2,
342 	0x00400500,
343 	0x00400504,
344 	NV04_PGRAPH_VALID1,
345 	NV04_PGRAPH_VALID2,
346 	NV04_PGRAPH_DEBUG_3
347 };
348 
349 #define nv04_gr(p) container_of((p), struct nv04_gr, base)
350 
351 struct nv04_gr {
352 	struct nvkm_gr base;
353 	struct nv04_gr_chan *chan[16];
354 	spinlock_t lock;
355 };
356 
357 #define nv04_gr_chan(p) container_of((p), struct nv04_gr_chan, object)
358 
359 struct nv04_gr_chan {
360 	struct nvkm_object object;
361 	struct nv04_gr *gr;
362 	int chid;
363 	u32 nv04[ARRAY_SIZE(nv04_gr_ctx_regs)];
364 };
365 
366 /*******************************************************************************
367  * Graphics object classes
368  ******************************************************************************/
369 
370 /*
371  * Software methods, why they are needed, and how they all work:
372  *
373  * NV04 and NV05 keep most of the state in PGRAPH context itself, but some
374  * 2d engine settings are kept inside the grobjs themselves. The grobjs are
375  * 3 words long on both. grobj format on NV04 is:
376  *
377  * word 0:
378  *  - bits 0-7: class
379  *  - bit 12: color key active
380  *  - bit 13: clip rect active
381  *  - bit 14: if set, destination surface is swizzled and taken from buffer 5
382  *            [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
383  *            from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
384  *            NV03_CONTEXT_SURFACE_DST].
385  *  - bits 15-17: 2d operation [aka patch config]
386  *  - bit 24: patch valid [enables rendering using this object]
387  *  - bit 25: surf3d valid [for tex_tri and multitex_tri only]
388  * word 1:
389  *  - bits 0-1: mono format
390  *  - bits 8-13: color format
391  *  - bits 16-31: DMA_NOTIFY instance
392  * word 2:
393  *  - bits 0-15: DMA_A instance
394  *  - bits 16-31: DMA_B instance
395  *
396  * On NV05 it's:
397  *
398  * word 0:
399  *  - bits 0-7: class
400  *  - bit 12: color key active
401  *  - bit 13: clip rect active
402  *  - bit 14: if set, destination surface is swizzled and taken from buffer 5
403  *            [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
404  *            from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
405  *            NV03_CONTEXT_SURFACE_DST].
406  *  - bits 15-17: 2d operation [aka patch config]
407  *  - bits 20-22: dither mode
408  *  - bit 24: patch valid [enables rendering using this object]
409  *  - bit 25: surface_dst/surface_color/surf2d/surf3d valid
410  *  - bit 26: surface_src/surface_zeta valid
411  *  - bit 27: pattern valid
412  *  - bit 28: rop valid
413  *  - bit 29: beta1 valid
414  *  - bit 30: beta4 valid
415  * word 1:
416  *  - bits 0-1: mono format
417  *  - bits 8-13: color format
418  *  - bits 16-31: DMA_NOTIFY instance
419  * word 2:
420  *  - bits 0-15: DMA_A instance
421  *  - bits 16-31: DMA_B instance
422  *
423  * NV05 will set/unset the relevant valid bits when you poke the relevant
424  * object-binding methods with object of the proper type, or with the NULL
425  * type. It'll only allow rendering using the grobj if all needed objects
426  * are bound. The needed set of objects depends on selected operation: for
427  * example rop object is needed by ROP_AND, but not by SRCCOPY_AND.
428  *
429  * NV04 doesn't have these methods implemented at all, and doesn't have the
430  * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24
431  * is set. So we have to emulate them in software, internally keeping the
432  * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04,
433  * but the last word isn't actually used for anything, we abuse it for this
434  * purpose.
435  *
436  * Actually, NV05 can optionally check bit 24 too, but we disable this since
437  * there's no use for it.
438  *
439  * For unknown reasons, NV04 implements surf3d binding in hardware as an
440  * exception. Also for unknown reasons, NV04 doesn't implement the clipping
441  * methods on the surf3d object, so we have to emulate them too.
442  */
443 
444 static void
nv04_gr_set_ctx1(struct nvkm_device * device,u32 inst,u32 mask,u32 value)445 nv04_gr_set_ctx1(struct nvkm_device *device, u32 inst, u32 mask, u32 value)
446 {
447 	int subc = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
448 	u32 tmp;
449 
450 	tmp  = nvkm_rd32(device, 0x700000 + inst);
451 	tmp &= ~mask;
452 	tmp |= value;
453 	nvkm_wr32(device, 0x700000 + inst, tmp);
454 
455 	nvkm_wr32(device, NV04_PGRAPH_CTX_SWITCH1, tmp);
456 	nvkm_wr32(device, NV04_PGRAPH_CTX_CACHE1 + (subc << 2), tmp);
457 }
458 
459 static void
nv04_gr_set_ctx_val(struct nvkm_device * device,u32 inst,u32 mask,u32 value)460 nv04_gr_set_ctx_val(struct nvkm_device *device, u32 inst, u32 mask, u32 value)
461 {
462 	int class, op, valid = 1;
463 	u32 tmp, ctx1;
464 
465 	ctx1 = nvkm_rd32(device, 0x700000 + inst);
466 	class = ctx1 & 0xff;
467 	op = (ctx1 >> 15) & 7;
468 
469 	tmp = nvkm_rd32(device, 0x70000c + inst);
470 	tmp &= ~mask;
471 	tmp |= value;
472 	nvkm_wr32(device, 0x70000c + inst, tmp);
473 
474 	/* check for valid surf2d/surf_dst/surf_color */
475 	if (!(tmp & 0x02000000))
476 		valid = 0;
477 	/* check for valid surf_src/surf_zeta */
478 	if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000))
479 		valid = 0;
480 
481 	switch (op) {
482 	/* SRCCOPY_AND, SRCCOPY: no extra objects required */
483 	case 0:
484 	case 3:
485 		break;
486 	/* ROP_AND: requires pattern and rop */
487 	case 1:
488 		if (!(tmp & 0x18000000))
489 			valid = 0;
490 		break;
491 	/* BLEND_AND: requires beta1 */
492 	case 2:
493 		if (!(tmp & 0x20000000))
494 			valid = 0;
495 		break;
496 	/* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */
497 	case 4:
498 	case 5:
499 		if (!(tmp & 0x40000000))
500 			valid = 0;
501 		break;
502 	}
503 
504 	nv04_gr_set_ctx1(device, inst, 0x01000000, valid << 24);
505 }
506 
507 static bool
nv04_gr_mthd_set_operation(struct nvkm_device * device,u32 inst,u32 data)508 nv04_gr_mthd_set_operation(struct nvkm_device *device, u32 inst, u32 data)
509 {
510 	u8 class = nvkm_rd32(device, 0x700000) & 0x000000ff;
511 	if (data > 5)
512 		return false;
513 	/* Old versions of the objects only accept first three operations. */
514 	if (data > 2 && class < 0x40)
515 		return false;
516 	nv04_gr_set_ctx1(device, inst, 0x00038000, data << 15);
517 	/* changing operation changes set of objects needed for validation */
518 	nv04_gr_set_ctx_val(device, inst, 0, 0);
519 	return true;
520 }
521 
522 static bool
nv04_gr_mthd_surf3d_clip_h(struct nvkm_device * device,u32 inst,u32 data)523 nv04_gr_mthd_surf3d_clip_h(struct nvkm_device *device, u32 inst, u32 data)
524 {
525 	u32 min = data & 0xffff, max;
526 	u32 w = data >> 16;
527 	if (min & 0x8000)
528 		/* too large */
529 		return false;
530 	if (w & 0x8000)
531 		/* yes, it accepts negative for some reason. */
532 		w |= 0xffff0000;
533 	max = min + w;
534 	max &= 0x3ffff;
535 	nvkm_wr32(device, 0x40053c, min);
536 	nvkm_wr32(device, 0x400544, max);
537 	return true;
538 }
539 
540 static bool
nv04_gr_mthd_surf3d_clip_v(struct nvkm_device * device,u32 inst,u32 data)541 nv04_gr_mthd_surf3d_clip_v(struct nvkm_device *device, u32 inst, u32 data)
542 {
543 	u32 min = data & 0xffff, max;
544 	u32 w = data >> 16;
545 	if (min & 0x8000)
546 		/* too large */
547 		return false;
548 	if (w & 0x8000)
549 		/* yes, it accepts negative for some reason. */
550 		w |= 0xffff0000;
551 	max = min + w;
552 	max &= 0x3ffff;
553 	nvkm_wr32(device, 0x400540, min);
554 	nvkm_wr32(device, 0x400548, max);
555 	return true;
556 }
557 
558 static u8
nv04_gr_mthd_bind_class(struct nvkm_device * device,u32 inst)559 nv04_gr_mthd_bind_class(struct nvkm_device *device, u32 inst)
560 {
561 	return nvkm_rd32(device, 0x700000 + (inst << 4));
562 }
563 
564 static bool
nv04_gr_mthd_bind_surf2d(struct nvkm_device * device,u32 inst,u32 data)565 nv04_gr_mthd_bind_surf2d(struct nvkm_device *device, u32 inst, u32 data)
566 {
567 	switch (nv04_gr_mthd_bind_class(device, data)) {
568 	case 0x30:
569 		nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
570 		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
571 		return true;
572 	case 0x42:
573 		nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
574 		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
575 		return true;
576 	}
577 	return false;
578 }
579 
580 static bool
nv04_gr_mthd_bind_surf2d_swzsurf(struct nvkm_device * device,u32 inst,u32 data)581 nv04_gr_mthd_bind_surf2d_swzsurf(struct nvkm_device *device, u32 inst, u32 data)
582 {
583 	switch (nv04_gr_mthd_bind_class(device, data)) {
584 	case 0x30:
585 		nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
586 		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
587 		return true;
588 	case 0x42:
589 		nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
590 		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
591 		return true;
592 	case 0x52:
593 		nv04_gr_set_ctx1(device, inst, 0x00004000, 0x00004000);
594 		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
595 		return true;
596 	}
597 	return false;
598 }
599 
600 static bool
nv01_gr_mthd_bind_patt(struct nvkm_device * device,u32 inst,u32 data)601 nv01_gr_mthd_bind_patt(struct nvkm_device *device, u32 inst, u32 data)
602 {
603 	switch (nv04_gr_mthd_bind_class(device, data)) {
604 	case 0x30:
605 		nv04_gr_set_ctx_val(device, inst, 0x08000000, 0);
606 		return true;
607 	case 0x18:
608 		nv04_gr_set_ctx_val(device, inst, 0x08000000, 0x08000000);
609 		return true;
610 	}
611 	return false;
612 }
613 
614 static bool
nv04_gr_mthd_bind_patt(struct nvkm_device * device,u32 inst,u32 data)615 nv04_gr_mthd_bind_patt(struct nvkm_device *device, u32 inst, u32 data)
616 {
617 	switch (nv04_gr_mthd_bind_class(device, data)) {
618 	case 0x30:
619 		nv04_gr_set_ctx_val(device, inst, 0x08000000, 0);
620 		return true;
621 	case 0x44:
622 		nv04_gr_set_ctx_val(device, inst, 0x08000000, 0x08000000);
623 		return true;
624 	}
625 	return false;
626 }
627 
628 static bool
nv04_gr_mthd_bind_rop(struct nvkm_device * device,u32 inst,u32 data)629 nv04_gr_mthd_bind_rop(struct nvkm_device *device, u32 inst, u32 data)
630 {
631 	switch (nv04_gr_mthd_bind_class(device, data)) {
632 	case 0x30:
633 		nv04_gr_set_ctx_val(device, inst, 0x10000000, 0);
634 		return true;
635 	case 0x43:
636 		nv04_gr_set_ctx_val(device, inst, 0x10000000, 0x10000000);
637 		return true;
638 	}
639 	return false;
640 }
641 
642 static bool
nv04_gr_mthd_bind_beta1(struct nvkm_device * device,u32 inst,u32 data)643 nv04_gr_mthd_bind_beta1(struct nvkm_device *device, u32 inst, u32 data)
644 {
645 	switch (nv04_gr_mthd_bind_class(device, data)) {
646 	case 0x30:
647 		nv04_gr_set_ctx_val(device, inst, 0x20000000, 0);
648 		return true;
649 	case 0x12:
650 		nv04_gr_set_ctx_val(device, inst, 0x20000000, 0x20000000);
651 		return true;
652 	}
653 	return false;
654 }
655 
656 static bool
nv04_gr_mthd_bind_beta4(struct nvkm_device * device,u32 inst,u32 data)657 nv04_gr_mthd_bind_beta4(struct nvkm_device *device, u32 inst, u32 data)
658 {
659 	switch (nv04_gr_mthd_bind_class(device, data)) {
660 	case 0x30:
661 		nv04_gr_set_ctx_val(device, inst, 0x40000000, 0);
662 		return true;
663 	case 0x72:
664 		nv04_gr_set_ctx_val(device, inst, 0x40000000, 0x40000000);
665 		return true;
666 	}
667 	return false;
668 }
669 
670 static bool
nv04_gr_mthd_bind_surf_dst(struct nvkm_device * device,u32 inst,u32 data)671 nv04_gr_mthd_bind_surf_dst(struct nvkm_device *device, u32 inst, u32 data)
672 {
673 	switch (nv04_gr_mthd_bind_class(device, data)) {
674 	case 0x30:
675 		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
676 		return true;
677 	case 0x58:
678 		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
679 		return true;
680 	}
681 	return false;
682 }
683 
684 static bool
nv04_gr_mthd_bind_surf_src(struct nvkm_device * device,u32 inst,u32 data)685 nv04_gr_mthd_bind_surf_src(struct nvkm_device *device, u32 inst, u32 data)
686 {
687 	switch (nv04_gr_mthd_bind_class(device, data)) {
688 	case 0x30:
689 		nv04_gr_set_ctx_val(device, inst, 0x04000000, 0);
690 		return true;
691 	case 0x59:
692 		nv04_gr_set_ctx_val(device, inst, 0x04000000, 0x04000000);
693 		return true;
694 	}
695 	return false;
696 }
697 
698 static bool
nv04_gr_mthd_bind_surf_color(struct nvkm_device * device,u32 inst,u32 data)699 nv04_gr_mthd_bind_surf_color(struct nvkm_device *device, u32 inst, u32 data)
700 {
701 	switch (nv04_gr_mthd_bind_class(device, data)) {
702 	case 0x30:
703 		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
704 		return true;
705 	case 0x5a:
706 		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
707 		return true;
708 	}
709 	return false;
710 }
711 
712 static bool
nv04_gr_mthd_bind_surf_zeta(struct nvkm_device * device,u32 inst,u32 data)713 nv04_gr_mthd_bind_surf_zeta(struct nvkm_device *device, u32 inst, u32 data)
714 {
715 	switch (nv04_gr_mthd_bind_class(device, data)) {
716 	case 0x30:
717 		nv04_gr_set_ctx_val(device, inst, 0x04000000, 0);
718 		return true;
719 	case 0x5b:
720 		nv04_gr_set_ctx_val(device, inst, 0x04000000, 0x04000000);
721 		return true;
722 	}
723 	return false;
724 }
725 
726 static bool
nv01_gr_mthd_bind_clip(struct nvkm_device * device,u32 inst,u32 data)727 nv01_gr_mthd_bind_clip(struct nvkm_device *device, u32 inst, u32 data)
728 {
729 	switch (nv04_gr_mthd_bind_class(device, data)) {
730 	case 0x30:
731 		nv04_gr_set_ctx1(device, inst, 0x2000, 0);
732 		return true;
733 	case 0x19:
734 		nv04_gr_set_ctx1(device, inst, 0x2000, 0x2000);
735 		return true;
736 	}
737 	return false;
738 }
739 
740 static bool
nv01_gr_mthd_bind_chroma(struct nvkm_device * device,u32 inst,u32 data)741 nv01_gr_mthd_bind_chroma(struct nvkm_device *device, u32 inst, u32 data)
742 {
743 	switch (nv04_gr_mthd_bind_class(device, data)) {
744 	case 0x30:
745 		nv04_gr_set_ctx1(device, inst, 0x1000, 0);
746 		return true;
747 	/* Yes, for some reason even the old versions of objects
748 	 * accept 0x57 and not 0x17. Consistency be damned.
749 	 */
750 	case 0x57:
751 		nv04_gr_set_ctx1(device, inst, 0x1000, 0x1000);
752 		return true;
753 	}
754 	return false;
755 }
756 
757 static bool
nv03_gr_mthd_gdi(struct nvkm_device * device,u32 inst,u32 mthd,u32 data)758 nv03_gr_mthd_gdi(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
759 {
760 	bool (*func)(struct nvkm_device *, u32, u32);
761 	switch (mthd) {
762 	case 0x0184: func = nv01_gr_mthd_bind_patt; break;
763 	case 0x0188: func = nv04_gr_mthd_bind_rop; break;
764 	case 0x018c: func = nv04_gr_mthd_bind_beta1; break;
765 	case 0x0190: func = nv04_gr_mthd_bind_surf_dst; break;
766 	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
767 	default:
768 		return false;
769 	}
770 	return func(device, inst, data);
771 }
772 
773 static bool
nv04_gr_mthd_gdi(struct nvkm_device * device,u32 inst,u32 mthd,u32 data)774 nv04_gr_mthd_gdi(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
775 {
776 	bool (*func)(struct nvkm_device *, u32, u32);
777 	switch (mthd) {
778 	case 0x0188: func = nv04_gr_mthd_bind_patt; break;
779 	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
780 	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
781 	case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
782 	case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
783 	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
784 	default:
785 		return false;
786 	}
787 	return func(device, inst, data);
788 }
789 
790 static bool
nv01_gr_mthd_blit(struct nvkm_device * device,u32 inst,u32 mthd,u32 data)791 nv01_gr_mthd_blit(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
792 {
793 	bool (*func)(struct nvkm_device *, u32, u32);
794 	switch (mthd) {
795 	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
796 	case 0x0188: func = nv01_gr_mthd_bind_clip; break;
797 	case 0x018c: func = nv01_gr_mthd_bind_patt; break;
798 	case 0x0190: func = nv04_gr_mthd_bind_rop; break;
799 	case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
800 	case 0x0198: func = nv04_gr_mthd_bind_surf_dst; break;
801 	case 0x019c: func = nv04_gr_mthd_bind_surf_src; break;
802 	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
803 	default:
804 		return false;
805 	}
806 	return func(device, inst, data);
807 }
808 
809 static bool
nv04_gr_mthd_blit(struct nvkm_device * device,u32 inst,u32 mthd,u32 data)810 nv04_gr_mthd_blit(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
811 {
812 	bool (*func)(struct nvkm_device *, u32, u32);
813 	switch (mthd) {
814 	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
815 	case 0x0188: func = nv01_gr_mthd_bind_clip; break;
816 	case 0x018c: func = nv04_gr_mthd_bind_patt; break;
817 	case 0x0190: func = nv04_gr_mthd_bind_rop; break;
818 	case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
819 	case 0x0198: func = nv04_gr_mthd_bind_beta4; break;
820 	case 0x019c: func = nv04_gr_mthd_bind_surf2d; break;
821 	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
822 	default:
823 		return false;
824 	}
825 	return func(device, inst, data);
826 }
827 
828 static bool
nv04_gr_mthd_iifc(struct nvkm_device * device,u32 inst,u32 mthd,u32 data)829 nv04_gr_mthd_iifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
830 {
831 	bool (*func)(struct nvkm_device *, u32, u32);
832 	switch (mthd) {
833 	case 0x0188: func = nv01_gr_mthd_bind_chroma; break;
834 	case 0x018c: func = nv01_gr_mthd_bind_clip; break;
835 	case 0x0190: func = nv04_gr_mthd_bind_patt; break;
836 	case 0x0194: func = nv04_gr_mthd_bind_rop; break;
837 	case 0x0198: func = nv04_gr_mthd_bind_beta1; break;
838 	case 0x019c: func = nv04_gr_mthd_bind_beta4; break;
839 	case 0x01a0: func = nv04_gr_mthd_bind_surf2d_swzsurf; break;
840 	case 0x03e4: func = nv04_gr_mthd_set_operation; break;
841 	default:
842 		return false;
843 	}
844 	return func(device, inst, data);
845 }
846 
847 static bool
nv01_gr_mthd_ifc(struct nvkm_device * device,u32 inst,u32 mthd,u32 data)848 nv01_gr_mthd_ifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
849 {
850 	bool (*func)(struct nvkm_device *, u32, u32);
851 	switch (mthd) {
852 	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
853 	case 0x0188: func = nv01_gr_mthd_bind_clip; break;
854 	case 0x018c: func = nv01_gr_mthd_bind_patt; break;
855 	case 0x0190: func = nv04_gr_mthd_bind_rop; break;
856 	case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
857 	case 0x0198: func = nv04_gr_mthd_bind_surf_dst; break;
858 	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
859 	default:
860 		return false;
861 	}
862 	return func(device, inst, data);
863 }
864 
865 static bool
nv04_gr_mthd_ifc(struct nvkm_device * device,u32 inst,u32 mthd,u32 data)866 nv04_gr_mthd_ifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
867 {
868 	bool (*func)(struct nvkm_device *, u32, u32);
869 	switch (mthd) {
870 	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
871 	case 0x0188: func = nv01_gr_mthd_bind_clip; break;
872 	case 0x018c: func = nv04_gr_mthd_bind_patt; break;
873 	case 0x0190: func = nv04_gr_mthd_bind_rop; break;
874 	case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
875 	case 0x0198: func = nv04_gr_mthd_bind_beta4; break;
876 	case 0x019c: func = nv04_gr_mthd_bind_surf2d; break;
877 	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
878 	default:
879 		return false;
880 	}
881 	return func(device, inst, data);
882 }
883 
884 static bool
nv03_gr_mthd_sifc(struct nvkm_device * device,u32 inst,u32 mthd,u32 data)885 nv03_gr_mthd_sifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
886 {
887 	bool (*func)(struct nvkm_device *, u32, u32);
888 	switch (mthd) {
889 	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
890 	case 0x0188: func = nv01_gr_mthd_bind_patt; break;
891 	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
892 	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
893 	case 0x0194: func = nv04_gr_mthd_bind_surf_dst; break;
894 	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
895 	default:
896 		return false;
897 	}
898 	return func(device, inst, data);
899 }
900 
901 static bool
nv04_gr_mthd_sifc(struct nvkm_device * device,u32 inst,u32 mthd,u32 data)902 nv04_gr_mthd_sifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
903 {
904 	bool (*func)(struct nvkm_device *, u32, u32);
905 	switch (mthd) {
906 	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
907 	case 0x0188: func = nv04_gr_mthd_bind_patt; break;
908 	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
909 	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
910 	case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
911 	case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
912 	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
913 	default:
914 		return false;
915 	}
916 	return func(device, inst, data);
917 }
918 
919 static bool
nv03_gr_mthd_sifm(struct nvkm_device * device,u32 inst,u32 mthd,u32 data)920 nv03_gr_mthd_sifm(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
921 {
922 	bool (*func)(struct nvkm_device *, u32, u32);
923 	switch (mthd) {
924 	case 0x0188: func = nv01_gr_mthd_bind_patt; break;
925 	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
926 	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
927 	case 0x0194: func = nv04_gr_mthd_bind_surf_dst; break;
928 	case 0x0304: func = nv04_gr_mthd_set_operation; break;
929 	default:
930 		return false;
931 	}
932 	return func(device, inst, data);
933 }
934 
935 static bool
nv04_gr_mthd_sifm(struct nvkm_device * device,u32 inst,u32 mthd,u32 data)936 nv04_gr_mthd_sifm(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
937 {
938 	bool (*func)(struct nvkm_device *, u32, u32);
939 	switch (mthd) {
940 	case 0x0188: func = nv04_gr_mthd_bind_patt; break;
941 	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
942 	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
943 	case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
944 	case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
945 	case 0x0304: func = nv04_gr_mthd_set_operation; break;
946 	default:
947 		return false;
948 	}
949 	return func(device, inst, data);
950 }
951 
952 static bool
nv04_gr_mthd_surf3d(struct nvkm_device * device,u32 inst,u32 mthd,u32 data)953 nv04_gr_mthd_surf3d(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
954 {
955 	bool (*func)(struct nvkm_device *, u32, u32);
956 	switch (mthd) {
957 	case 0x02f8: func = nv04_gr_mthd_surf3d_clip_h; break;
958 	case 0x02fc: func = nv04_gr_mthd_surf3d_clip_v; break;
959 	default:
960 		return false;
961 	}
962 	return func(device, inst, data);
963 }
964 
965 static bool
nv03_gr_mthd_ttri(struct nvkm_device * device,u32 inst,u32 mthd,u32 data)966 nv03_gr_mthd_ttri(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
967 {
968 	bool (*func)(struct nvkm_device *, u32, u32);
969 	switch (mthd) {
970 	case 0x0188: func = nv01_gr_mthd_bind_clip; break;
971 	case 0x018c: func = nv04_gr_mthd_bind_surf_color; break;
972 	case 0x0190: func = nv04_gr_mthd_bind_surf_zeta; break;
973 	default:
974 		return false;
975 	}
976 	return func(device, inst, data);
977 }
978 
979 static bool
nv01_gr_mthd_prim(struct nvkm_device * device,u32 inst,u32 mthd,u32 data)980 nv01_gr_mthd_prim(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
981 {
982 	bool (*func)(struct nvkm_device *, u32, u32);
983 	switch (mthd) {
984 	case 0x0184: func = nv01_gr_mthd_bind_clip; break;
985 	case 0x0188: func = nv01_gr_mthd_bind_patt; break;
986 	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
987 	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
988 	case 0x0194: func = nv04_gr_mthd_bind_surf_dst; break;
989 	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
990 	default:
991 		return false;
992 	}
993 	return func(device, inst, data);
994 }
995 
996 static bool
nv04_gr_mthd_prim(struct nvkm_device * device,u32 inst,u32 mthd,u32 data)997 nv04_gr_mthd_prim(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
998 {
999 	bool (*func)(struct nvkm_device *, u32, u32);
1000 	switch (mthd) {
1001 	case 0x0184: func = nv01_gr_mthd_bind_clip; break;
1002 	case 0x0188: func = nv04_gr_mthd_bind_patt; break;
1003 	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
1004 	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
1005 	case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
1006 	case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
1007 	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
1008 	default:
1009 		return false;
1010 	}
1011 	return func(device, inst, data);
1012 }
1013 
1014 static bool
nv04_gr_mthd(struct nvkm_device * device,u32 inst,u32 mthd,u32 data)1015 nv04_gr_mthd(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
1016 {
1017 	bool (*func)(struct nvkm_device *, u32, u32, u32);
1018 	switch (nvkm_rd32(device, 0x700000 + inst) & 0x000000ff) {
1019 	case 0x1c ... 0x1e:
1020 		   func = nv01_gr_mthd_prim; break;
1021 	case 0x1f: func = nv01_gr_mthd_blit; break;
1022 	case 0x21: func = nv01_gr_mthd_ifc; break;
1023 	case 0x36: func = nv03_gr_mthd_sifc; break;
1024 	case 0x37: func = nv03_gr_mthd_sifm; break;
1025 	case 0x48: func = nv03_gr_mthd_ttri; break;
1026 	case 0x4a: func = nv04_gr_mthd_gdi; break;
1027 	case 0x4b: func = nv03_gr_mthd_gdi; break;
1028 	case 0x53: func = nv04_gr_mthd_surf3d; break;
1029 	case 0x5c ... 0x5e:
1030 		   func = nv04_gr_mthd_prim; break;
1031 	case 0x5f: func = nv04_gr_mthd_blit; break;
1032 	case 0x60: func = nv04_gr_mthd_iifc; break;
1033 	case 0x61: func = nv04_gr_mthd_ifc; break;
1034 	case 0x76: func = nv04_gr_mthd_sifc; break;
1035 	case 0x77: func = nv04_gr_mthd_sifm; break;
1036 	default:
1037 		return false;
1038 	}
1039 	return func(device, inst, mthd, data);
1040 }
1041 
1042 static int
nv04_gr_object_bind(struct nvkm_object * object,struct nvkm_gpuobj * parent,int align,struct nvkm_gpuobj ** pgpuobj)1043 nv04_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
1044 		    int align, struct nvkm_gpuobj **pgpuobj)
1045 {
1046 	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, align,
1047 				  false, parent, pgpuobj);
1048 	if (ret == 0) {
1049 		nvkm_kmap(*pgpuobj);
1050 		nvkm_wo32(*pgpuobj, 0x00, object->oclass);
1051 #ifdef __BIG_ENDIAN
1052 		nvkm_mo32(*pgpuobj, 0x00, 0x00080000, 0x00080000);
1053 #endif
1054 		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
1055 		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
1056 		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
1057 		nvkm_done(*pgpuobj);
1058 	}
1059 	return ret;
1060 }
1061 
1062 const struct nvkm_object_func
1063 nv04_gr_object = {
1064 	.bind = nv04_gr_object_bind,
1065 };
1066 
1067 /*******************************************************************************
1068  * PGRAPH context
1069  ******************************************************************************/
1070 
1071 static struct nv04_gr_chan *
nv04_gr_channel(struct nv04_gr * gr)1072 nv04_gr_channel(struct nv04_gr *gr)
1073 {
1074 	struct nvkm_device *device = gr->base.engine.subdev.device;
1075 	struct nv04_gr_chan *chan = NULL;
1076 	if (nvkm_rd32(device, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) {
1077 		int chid = nvkm_rd32(device, NV04_PGRAPH_CTX_USER) >> 24;
1078 		if (chid < ARRAY_SIZE(gr->chan))
1079 			chan = gr->chan[chid];
1080 	}
1081 	return chan;
1082 }
1083 
1084 static int
nv04_gr_load_context(struct nv04_gr_chan * chan,int chid)1085 nv04_gr_load_context(struct nv04_gr_chan *chan, int chid)
1086 {
1087 	struct nvkm_device *device = chan->gr->base.engine.subdev.device;
1088 	int i;
1089 
1090 	for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++)
1091 		nvkm_wr32(device, nv04_gr_ctx_regs[i], chan->nv04[i]);
1092 
1093 	nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
1094 	nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, chid << 24);
1095 	nvkm_mask(device, NV04_PGRAPH_FFINTFC_ST2, 0xfff00000, 0x00000000);
1096 	return 0;
1097 }
1098 
1099 static int
nv04_gr_unload_context(struct nv04_gr_chan * chan)1100 nv04_gr_unload_context(struct nv04_gr_chan *chan)
1101 {
1102 	struct nvkm_device *device = chan->gr->base.engine.subdev.device;
1103 	int i;
1104 
1105 	for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++)
1106 		chan->nv04[i] = nvkm_rd32(device, nv04_gr_ctx_regs[i]);
1107 
1108 	nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
1109 	nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
1110 	return 0;
1111 }
1112 
1113 static void
nv04_gr_context_switch(struct nv04_gr * gr)1114 nv04_gr_context_switch(struct nv04_gr *gr)
1115 {
1116 	struct nvkm_device *device = gr->base.engine.subdev.device;
1117 	struct nv04_gr_chan *prev = NULL;
1118 	struct nv04_gr_chan *next = NULL;
1119 	int chid;
1120 
1121 	nv04_gr_idle(&gr->base);
1122 
1123 	/* If previous context is valid, we need to save it */
1124 	prev = nv04_gr_channel(gr);
1125 	if (prev)
1126 		nv04_gr_unload_context(prev);
1127 
1128 	/* load context for next channel */
1129 	chid = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0x0f;
1130 	next = gr->chan[chid];
1131 	if (next)
1132 		nv04_gr_load_context(next, chid);
1133 }
1134 
ctx_reg(struct nv04_gr_chan * chan,u32 reg)1135 static u32 *ctx_reg(struct nv04_gr_chan *chan, u32 reg)
1136 {
1137 	int i;
1138 
1139 	for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++) {
1140 		if (nv04_gr_ctx_regs[i] == reg)
1141 			return &chan->nv04[i];
1142 	}
1143 
1144 	return NULL;
1145 }
1146 
1147 static void *
nv04_gr_chan_dtor(struct nvkm_object * object)1148 nv04_gr_chan_dtor(struct nvkm_object *object)
1149 {
1150 	struct nv04_gr_chan *chan = nv04_gr_chan(object);
1151 	struct nv04_gr *gr = chan->gr;
1152 	unsigned long flags;
1153 
1154 	spin_lock_irqsave(&gr->lock, flags);
1155 	gr->chan[chan->chid] = NULL;
1156 	spin_unlock_irqrestore(&gr->lock, flags);
1157 	return chan;
1158 }
1159 
1160 static int
nv04_gr_chan_fini(struct nvkm_object * object,bool suspend)1161 nv04_gr_chan_fini(struct nvkm_object *object, bool suspend)
1162 {
1163 	struct nv04_gr_chan *chan = nv04_gr_chan(object);
1164 	struct nv04_gr *gr = chan->gr;
1165 	struct nvkm_device *device = gr->base.engine.subdev.device;
1166 	unsigned long flags;
1167 
1168 	spin_lock_irqsave(&gr->lock, flags);
1169 	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
1170 	if (nv04_gr_channel(gr) == chan)
1171 		nv04_gr_unload_context(chan);
1172 	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
1173 	spin_unlock_irqrestore(&gr->lock, flags);
1174 	return 0;
1175 }
1176 
1177 static const struct nvkm_object_func
1178 nv04_gr_chan = {
1179 	.dtor = nv04_gr_chan_dtor,
1180 	.fini = nv04_gr_chan_fini,
1181 };
1182 
1183 static int
nv04_gr_chan_new(struct nvkm_gr * base,struct nvkm_chan * fifoch,const struct nvkm_oclass * oclass,struct nvkm_object ** pobject)1184 nv04_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
1185 		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
1186 {
1187 	struct nv04_gr *gr = nv04_gr(base);
1188 	struct nv04_gr_chan *chan;
1189 	unsigned long flags;
1190 
1191 	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
1192 		return -ENOMEM;
1193 	nvkm_object_ctor(&nv04_gr_chan, oclass, &chan->object);
1194 	chan->gr = gr;
1195 	chan->chid = fifoch->id;
1196 	*pobject = &chan->object;
1197 
1198 	*ctx_reg(chan, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
1199 
1200 	spin_lock_irqsave(&gr->lock, flags);
1201 	gr->chan[chan->chid] = chan;
1202 	spin_unlock_irqrestore(&gr->lock, flags);
1203 	return 0;
1204 }
1205 
1206 /*******************************************************************************
1207  * PGRAPH engine/subdev functions
1208  ******************************************************************************/
1209 
1210 bool
nv04_gr_idle(struct nvkm_gr * gr)1211 nv04_gr_idle(struct nvkm_gr *gr)
1212 {
1213 	struct nvkm_subdev *subdev = &gr->engine.subdev;
1214 	struct nvkm_device *device = subdev->device;
1215 	u32 mask = 0xffffffff;
1216 
1217 	if (device->card_type == NV_40)
1218 		mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
1219 
1220 	if (nvkm_msec(device, 2000,
1221 		if (!(nvkm_rd32(device, NV04_PGRAPH_STATUS) & mask))
1222 			break;
1223 	) < 0) {
1224 		nvkm_error(subdev, "idle timed out with status %08x\n",
1225 			   nvkm_rd32(device, NV04_PGRAPH_STATUS));
1226 		return false;
1227 	}
1228 
1229 	return true;
1230 }
1231 
1232 static const struct nvkm_bitfield
1233 nv04_gr_intr_name[] = {
1234 	{ NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
1235 	{}
1236 };
1237 
1238 static const struct nvkm_bitfield
1239 nv04_gr_nstatus[] = {
1240 	{ NV04_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
1241 	{ NV04_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
1242 	{ NV04_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
1243 	{ NV04_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" },
1244 	{}
1245 };
1246 
1247 const struct nvkm_bitfield
1248 nv04_gr_nsource[] = {
1249 	{ NV03_PGRAPH_NSOURCE_NOTIFICATION,       "NOTIFICATION" },
1250 	{ NV03_PGRAPH_NSOURCE_DATA_ERROR,         "DATA_ERROR" },
1251 	{ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR,   "PROTECTION_ERROR" },
1252 	{ NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION,    "RANGE_EXCEPTION" },
1253 	{ NV03_PGRAPH_NSOURCE_LIMIT_COLOR,        "LIMIT_COLOR" },
1254 	{ NV03_PGRAPH_NSOURCE_LIMIT_ZETA,         "LIMIT_ZETA" },
1255 	{ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD,       "ILLEGAL_MTHD" },
1256 	{ NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION,   "DMA_R_PROTECTION" },
1257 	{ NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION,   "DMA_W_PROTECTION" },
1258 	{ NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION,   "FORMAT_EXCEPTION" },
1259 	{ NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION,    "PATCH_EXCEPTION" },
1260 	{ NV03_PGRAPH_NSOURCE_STATE_INVALID,      "STATE_INVALID" },
1261 	{ NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY,      "DOUBLE_NOTIFY" },
1262 	{ NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE,      "NOTIFY_IN_USE" },
1263 	{ NV03_PGRAPH_NSOURCE_METHOD_CNT,         "METHOD_CNT" },
1264 	{ NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION,   "BFR_NOTIFICATION" },
1265 	{ NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
1266 	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_A,        "DMA_WIDTH_A" },
1267 	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_B,        "DMA_WIDTH_B" },
1268 	{}
1269 };
1270 
1271 static void
nv04_gr_intr(struct nvkm_gr * base)1272 nv04_gr_intr(struct nvkm_gr *base)
1273 {
1274 	struct nv04_gr *gr = nv04_gr(base);
1275 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1276 	struct nvkm_device *device = subdev->device;
1277 	u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
1278 	u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
1279 	u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
1280 	u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
1281 	u32 chid = (addr & 0x0f000000) >> 24;
1282 	u32 subc = (addr & 0x0000e000) >> 13;
1283 	u32 mthd = (addr & 0x00001ffc);
1284 	u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
1285 	u32 class = nvkm_rd32(device, 0x400180 + subc * 4) & 0xff;
1286 	u32 inst = (nvkm_rd32(device, 0x40016c) & 0xffff) << 4;
1287 	u32 show = stat;
1288 	char msg[128], src[128], sta[128];
1289 	struct nv04_gr_chan *chan;
1290 	unsigned long flags;
1291 
1292 	spin_lock_irqsave(&gr->lock, flags);
1293 	chan = gr->chan[chid];
1294 
1295 	if (stat & NV_PGRAPH_INTR_NOTIFY) {
1296 		if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
1297 			if (!nv04_gr_mthd(device, inst, mthd, data))
1298 				show &= ~NV_PGRAPH_INTR_NOTIFY;
1299 		}
1300 	}
1301 
1302 	if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1303 		nvkm_wr32(device, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1304 		stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1305 		show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1306 		nv04_gr_context_switch(gr);
1307 	}
1308 
1309 	nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
1310 	nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
1311 
1312 	if (show) {
1313 		nvkm_snprintbf(msg, sizeof(msg), nv04_gr_intr_name, show);
1314 		nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
1315 		nvkm_snprintbf(sta, sizeof(sta), nv04_gr_nstatus, nstatus);
1316 		nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
1317 				   "nstatus %08x [%s] ch %d [%s] subc %d "
1318 				   "class %04x mthd %04x data %08x\n",
1319 			   show, msg, nsource, src, nstatus, sta, chid,
1320 			   chan ? chan->object.client->name : "unknown",
1321 			   subc, class, mthd, data);
1322 	}
1323 
1324 	spin_unlock_irqrestore(&gr->lock, flags);
1325 }
1326 
1327 static int
nv04_gr_init(struct nvkm_gr * base)1328 nv04_gr_init(struct nvkm_gr *base)
1329 {
1330 	struct nv04_gr *gr = nv04_gr(base);
1331 	struct nvkm_device *device = gr->base.engine.subdev.device;
1332 
1333 	/* Enable PGRAPH interrupts */
1334 	nvkm_wr32(device, NV03_PGRAPH_INTR, 0xFFFFFFFF);
1335 	nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
1336 
1337 	nvkm_wr32(device, NV04_PGRAPH_VALID1, 0);
1338 	nvkm_wr32(device, NV04_PGRAPH_VALID2, 0);
1339 	/*nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x000001FF);
1340 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
1341 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x1231c000);
1342 	/*1231C000 blob, 001 haiku*/
1343 	/*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
1344 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x72111100);
1345 	/*0x72111100 blob , 01 haiku*/
1346 	/*nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
1347 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
1348 	/*haiku same*/
1349 
1350 	/*nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
1351 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
1352 	/*haiku and blob 10d4*/
1353 
1354 	nvkm_wr32(device, NV04_PGRAPH_STATE        , 0xFFFFFFFF);
1355 	nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL  , 0x10000100);
1356 	nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
1357 
1358 	/* These don't belong here, they're part of a per-channel context */
1359 	nvkm_wr32(device, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
1360 	nvkm_wr32(device, NV04_PGRAPH_BETA_AND     , 0xFFFFFFFF);
1361 	return 0;
1362 }
1363 
1364 static const struct nvkm_gr_func
1365 nv04_gr = {
1366 	.init = nv04_gr_init,
1367 	.intr = nv04_gr_intr,
1368 	.chan_new = nv04_gr_chan_new,
1369 	.sclass = {
1370 		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
1371 		{ -1, -1, 0x0017, &nv04_gr_object }, /* chroma */
1372 		{ -1, -1, 0x0018, &nv04_gr_object }, /* pattern (nv01) */
1373 		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
1374 		{ -1, -1, 0x001c, &nv04_gr_object }, /* line */
1375 		{ -1, -1, 0x001d, &nv04_gr_object }, /* tri */
1376 		{ -1, -1, 0x001e, &nv04_gr_object }, /* rect */
1377 		{ -1, -1, 0x001f, &nv04_gr_object },
1378 		{ -1, -1, 0x0021, &nv04_gr_object },
1379 		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
1380 		{ -1, -1, 0x0036, &nv04_gr_object },
1381 		{ -1, -1, 0x0037, &nv04_gr_object },
1382 		{ -1, -1, 0x0038, &nv04_gr_object }, /* dvd subpicture */
1383 		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
1384 		{ -1, -1, 0x0042, &nv04_gr_object }, /* surf2d */
1385 		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
1386 		{ -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
1387 		{ -1, -1, 0x0048, &nv04_gr_object },
1388 		{ -1, -1, 0x004a, &nv04_gr_object },
1389 		{ -1, -1, 0x004b, &nv04_gr_object },
1390 		{ -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
1391 		{ -1, -1, 0x0053, &nv04_gr_object },
1392 		{ -1, -1, 0x0054, &nv04_gr_object }, /* ttri */
1393 		{ -1, -1, 0x0055, &nv04_gr_object }, /* mtri */
1394 		{ -1, -1, 0x0057, &nv04_gr_object }, /* chroma */
1395 		{ -1, -1, 0x0058, &nv04_gr_object }, /* surf_dst */
1396 		{ -1, -1, 0x0059, &nv04_gr_object }, /* surf_src */
1397 		{ -1, -1, 0x005a, &nv04_gr_object }, /* surf_color */
1398 		{ -1, -1, 0x005b, &nv04_gr_object }, /* surf_zeta */
1399 		{ -1, -1, 0x005c, &nv04_gr_object }, /* line */
1400 		{ -1, -1, 0x005d, &nv04_gr_object }, /* tri */
1401 		{ -1, -1, 0x005e, &nv04_gr_object }, /* rect */
1402 		{ -1, -1, 0x005f, &nv04_gr_object },
1403 		{ -1, -1, 0x0060, &nv04_gr_object },
1404 		{ -1, -1, 0x0061, &nv04_gr_object },
1405 		{ -1, -1, 0x0064, &nv04_gr_object }, /* iifc (nv05) */
1406 		{ -1, -1, 0x0065, &nv04_gr_object }, /* ifc (nv05) */
1407 		{ -1, -1, 0x0066, &nv04_gr_object }, /* sifc (nv05) */
1408 		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
1409 		{ -1, -1, 0x0076, &nv04_gr_object },
1410 		{ -1, -1, 0x0077, &nv04_gr_object },
1411 		{}
1412 	}
1413 };
1414 
1415 int
nv04_gr_new(struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_gr ** pgr)1416 nv04_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
1417 {
1418 	struct nv04_gr *gr;
1419 
1420 	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
1421 		return -ENOMEM;
1422 	spin_lock_init(&gr->lock);
1423 	*pgr = &gr->base;
1424 
1425 	return nvkm_gr_ctor(&nv04_gr, device, type, inst, true, &gr->base);
1426 }
1427