1 /*
2  * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragr) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  */
24 #include <engine/gr.h>
25 #include "regs.h"
26 
27 #include <core/client.h>
28 #include <core/handle.h>
29 #include <engine/fifo.h>
30 #include <subdev/fb.h>
31 
32 struct pipe_state {
33 	u32 pipe_0x0000[0x040/4];
34 	u32 pipe_0x0040[0x010/4];
35 	u32 pipe_0x0200[0x0c0/4];
36 	u32 pipe_0x4400[0x080/4];
37 	u32 pipe_0x6400[0x3b0/4];
38 	u32 pipe_0x6800[0x2f0/4];
39 	u32 pipe_0x6c00[0x030/4];
40 	u32 pipe_0x7000[0x130/4];
41 	u32 pipe_0x7400[0x0c0/4];
42 	u32 pipe_0x7800[0x0c0/4];
43 };
44 
45 static int nv10_gr_ctx_regs[] = {
46 	NV10_PGRAPH_CTX_SWITCH(0),
47 	NV10_PGRAPH_CTX_SWITCH(1),
48 	NV10_PGRAPH_CTX_SWITCH(2),
49 	NV10_PGRAPH_CTX_SWITCH(3),
50 	NV10_PGRAPH_CTX_SWITCH(4),
51 	NV10_PGRAPH_CTX_CACHE(0, 0),
52 	NV10_PGRAPH_CTX_CACHE(0, 1),
53 	NV10_PGRAPH_CTX_CACHE(0, 2),
54 	NV10_PGRAPH_CTX_CACHE(0, 3),
55 	NV10_PGRAPH_CTX_CACHE(0, 4),
56 	NV10_PGRAPH_CTX_CACHE(1, 0),
57 	NV10_PGRAPH_CTX_CACHE(1, 1),
58 	NV10_PGRAPH_CTX_CACHE(1, 2),
59 	NV10_PGRAPH_CTX_CACHE(1, 3),
60 	NV10_PGRAPH_CTX_CACHE(1, 4),
61 	NV10_PGRAPH_CTX_CACHE(2, 0),
62 	NV10_PGRAPH_CTX_CACHE(2, 1),
63 	NV10_PGRAPH_CTX_CACHE(2, 2),
64 	NV10_PGRAPH_CTX_CACHE(2, 3),
65 	NV10_PGRAPH_CTX_CACHE(2, 4),
66 	NV10_PGRAPH_CTX_CACHE(3, 0),
67 	NV10_PGRAPH_CTX_CACHE(3, 1),
68 	NV10_PGRAPH_CTX_CACHE(3, 2),
69 	NV10_PGRAPH_CTX_CACHE(3, 3),
70 	NV10_PGRAPH_CTX_CACHE(3, 4),
71 	NV10_PGRAPH_CTX_CACHE(4, 0),
72 	NV10_PGRAPH_CTX_CACHE(4, 1),
73 	NV10_PGRAPH_CTX_CACHE(4, 2),
74 	NV10_PGRAPH_CTX_CACHE(4, 3),
75 	NV10_PGRAPH_CTX_CACHE(4, 4),
76 	NV10_PGRAPH_CTX_CACHE(5, 0),
77 	NV10_PGRAPH_CTX_CACHE(5, 1),
78 	NV10_PGRAPH_CTX_CACHE(5, 2),
79 	NV10_PGRAPH_CTX_CACHE(5, 3),
80 	NV10_PGRAPH_CTX_CACHE(5, 4),
81 	NV10_PGRAPH_CTX_CACHE(6, 0),
82 	NV10_PGRAPH_CTX_CACHE(6, 1),
83 	NV10_PGRAPH_CTX_CACHE(6, 2),
84 	NV10_PGRAPH_CTX_CACHE(6, 3),
85 	NV10_PGRAPH_CTX_CACHE(6, 4),
86 	NV10_PGRAPH_CTX_CACHE(7, 0),
87 	NV10_PGRAPH_CTX_CACHE(7, 1),
88 	NV10_PGRAPH_CTX_CACHE(7, 2),
89 	NV10_PGRAPH_CTX_CACHE(7, 3),
90 	NV10_PGRAPH_CTX_CACHE(7, 4),
91 	NV10_PGRAPH_CTX_USER,
92 	NV04_PGRAPH_DMA_START_0,
93 	NV04_PGRAPH_DMA_START_1,
94 	NV04_PGRAPH_DMA_LENGTH,
95 	NV04_PGRAPH_DMA_MISC,
96 	NV10_PGRAPH_DMA_PITCH,
97 	NV04_PGRAPH_BOFFSET0,
98 	NV04_PGRAPH_BBASE0,
99 	NV04_PGRAPH_BLIMIT0,
100 	NV04_PGRAPH_BOFFSET1,
101 	NV04_PGRAPH_BBASE1,
102 	NV04_PGRAPH_BLIMIT1,
103 	NV04_PGRAPH_BOFFSET2,
104 	NV04_PGRAPH_BBASE2,
105 	NV04_PGRAPH_BLIMIT2,
106 	NV04_PGRAPH_BOFFSET3,
107 	NV04_PGRAPH_BBASE3,
108 	NV04_PGRAPH_BLIMIT3,
109 	NV04_PGRAPH_BOFFSET4,
110 	NV04_PGRAPH_BBASE4,
111 	NV04_PGRAPH_BLIMIT4,
112 	NV04_PGRAPH_BOFFSET5,
113 	NV04_PGRAPH_BBASE5,
114 	NV04_PGRAPH_BLIMIT5,
115 	NV04_PGRAPH_BPITCH0,
116 	NV04_PGRAPH_BPITCH1,
117 	NV04_PGRAPH_BPITCH2,
118 	NV04_PGRAPH_BPITCH3,
119 	NV04_PGRAPH_BPITCH4,
120 	NV10_PGRAPH_SURFACE,
121 	NV10_PGRAPH_STATE,
122 	NV04_PGRAPH_BSWIZZLE2,
123 	NV04_PGRAPH_BSWIZZLE5,
124 	NV04_PGRAPH_BPIXEL,
125 	NV10_PGRAPH_NOTIFY,
126 	NV04_PGRAPH_PATT_COLOR0,
127 	NV04_PGRAPH_PATT_COLOR1,
128 	NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
129 	0x00400904,
130 	0x00400908,
131 	0x0040090c,
132 	0x00400910,
133 	0x00400914,
134 	0x00400918,
135 	0x0040091c,
136 	0x00400920,
137 	0x00400924,
138 	0x00400928,
139 	0x0040092c,
140 	0x00400930,
141 	0x00400934,
142 	0x00400938,
143 	0x0040093c,
144 	0x00400940,
145 	0x00400944,
146 	0x00400948,
147 	0x0040094c,
148 	0x00400950,
149 	0x00400954,
150 	0x00400958,
151 	0x0040095c,
152 	0x00400960,
153 	0x00400964,
154 	0x00400968,
155 	0x0040096c,
156 	0x00400970,
157 	0x00400974,
158 	0x00400978,
159 	0x0040097c,
160 	0x00400980,
161 	0x00400984,
162 	0x00400988,
163 	0x0040098c,
164 	0x00400990,
165 	0x00400994,
166 	0x00400998,
167 	0x0040099c,
168 	0x004009a0,
169 	0x004009a4,
170 	0x004009a8,
171 	0x004009ac,
172 	0x004009b0,
173 	0x004009b4,
174 	0x004009b8,
175 	0x004009bc,
176 	0x004009c0,
177 	0x004009c4,
178 	0x004009c8,
179 	0x004009cc,
180 	0x004009d0,
181 	0x004009d4,
182 	0x004009d8,
183 	0x004009dc,
184 	0x004009e0,
185 	0x004009e4,
186 	0x004009e8,
187 	0x004009ec,
188 	0x004009f0,
189 	0x004009f4,
190 	0x004009f8,
191 	0x004009fc,
192 	NV04_PGRAPH_PATTERN,	/* 2 values from 0x400808 to 0x40080c */
193 	0x0040080c,
194 	NV04_PGRAPH_PATTERN_SHAPE,
195 	NV03_PGRAPH_MONO_COLOR0,
196 	NV04_PGRAPH_ROP3,
197 	NV04_PGRAPH_CHROMA,
198 	NV04_PGRAPH_BETA_AND,
199 	NV04_PGRAPH_BETA_PREMULT,
200 	0x00400e70,
201 	0x00400e74,
202 	0x00400e78,
203 	0x00400e7c,
204 	0x00400e80,
205 	0x00400e84,
206 	0x00400e88,
207 	0x00400e8c,
208 	0x00400ea0,
209 	0x00400ea4,
210 	0x00400ea8,
211 	0x00400e90,
212 	0x00400e94,
213 	0x00400e98,
214 	0x00400e9c,
215 	NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */
216 	NV10_PGRAPH_WINDOWCLIP_VERTICAL,   /* 8 values from 0x400f20-0x400f3c */
217 	0x00400f04,
218 	0x00400f24,
219 	0x00400f08,
220 	0x00400f28,
221 	0x00400f0c,
222 	0x00400f2c,
223 	0x00400f10,
224 	0x00400f30,
225 	0x00400f14,
226 	0x00400f34,
227 	0x00400f18,
228 	0x00400f38,
229 	0x00400f1c,
230 	0x00400f3c,
231 	NV10_PGRAPH_XFMODE0,
232 	NV10_PGRAPH_XFMODE1,
233 	NV10_PGRAPH_GLOBALSTATE0,
234 	NV10_PGRAPH_GLOBALSTATE1,
235 	NV04_PGRAPH_STORED_FMT,
236 	NV04_PGRAPH_SOURCE_COLOR,
237 	NV03_PGRAPH_ABS_X_RAM,	/* 32 values from 0x400400 to 0x40047c */
238 	NV03_PGRAPH_ABS_Y_RAM,	/* 32 values from 0x400480 to 0x4004fc */
239 	0x00400404,
240 	0x00400484,
241 	0x00400408,
242 	0x00400488,
243 	0x0040040c,
244 	0x0040048c,
245 	0x00400410,
246 	0x00400490,
247 	0x00400414,
248 	0x00400494,
249 	0x00400418,
250 	0x00400498,
251 	0x0040041c,
252 	0x0040049c,
253 	0x00400420,
254 	0x004004a0,
255 	0x00400424,
256 	0x004004a4,
257 	0x00400428,
258 	0x004004a8,
259 	0x0040042c,
260 	0x004004ac,
261 	0x00400430,
262 	0x004004b0,
263 	0x00400434,
264 	0x004004b4,
265 	0x00400438,
266 	0x004004b8,
267 	0x0040043c,
268 	0x004004bc,
269 	0x00400440,
270 	0x004004c0,
271 	0x00400444,
272 	0x004004c4,
273 	0x00400448,
274 	0x004004c8,
275 	0x0040044c,
276 	0x004004cc,
277 	0x00400450,
278 	0x004004d0,
279 	0x00400454,
280 	0x004004d4,
281 	0x00400458,
282 	0x004004d8,
283 	0x0040045c,
284 	0x004004dc,
285 	0x00400460,
286 	0x004004e0,
287 	0x00400464,
288 	0x004004e4,
289 	0x00400468,
290 	0x004004e8,
291 	0x0040046c,
292 	0x004004ec,
293 	0x00400470,
294 	0x004004f0,
295 	0x00400474,
296 	0x004004f4,
297 	0x00400478,
298 	0x004004f8,
299 	0x0040047c,
300 	0x004004fc,
301 	NV03_PGRAPH_ABS_UCLIP_XMIN,
302 	NV03_PGRAPH_ABS_UCLIP_XMAX,
303 	NV03_PGRAPH_ABS_UCLIP_YMIN,
304 	NV03_PGRAPH_ABS_UCLIP_YMAX,
305 	0x00400550,
306 	0x00400558,
307 	0x00400554,
308 	0x0040055c,
309 	NV03_PGRAPH_ABS_UCLIPA_XMIN,
310 	NV03_PGRAPH_ABS_UCLIPA_XMAX,
311 	NV03_PGRAPH_ABS_UCLIPA_YMIN,
312 	NV03_PGRAPH_ABS_UCLIPA_YMAX,
313 	NV03_PGRAPH_ABS_ICLIP_XMAX,
314 	NV03_PGRAPH_ABS_ICLIP_YMAX,
315 	NV03_PGRAPH_XY_LOGIC_MISC0,
316 	NV03_PGRAPH_XY_LOGIC_MISC1,
317 	NV03_PGRAPH_XY_LOGIC_MISC2,
318 	NV03_PGRAPH_XY_LOGIC_MISC3,
319 	NV03_PGRAPH_CLIPX_0,
320 	NV03_PGRAPH_CLIPX_1,
321 	NV03_PGRAPH_CLIPY_0,
322 	NV03_PGRAPH_CLIPY_1,
323 	NV10_PGRAPH_COMBINER0_IN_ALPHA,
324 	NV10_PGRAPH_COMBINER1_IN_ALPHA,
325 	NV10_PGRAPH_COMBINER0_IN_RGB,
326 	NV10_PGRAPH_COMBINER1_IN_RGB,
327 	NV10_PGRAPH_COMBINER_COLOR0,
328 	NV10_PGRAPH_COMBINER_COLOR1,
329 	NV10_PGRAPH_COMBINER0_OUT_ALPHA,
330 	NV10_PGRAPH_COMBINER1_OUT_ALPHA,
331 	NV10_PGRAPH_COMBINER0_OUT_RGB,
332 	NV10_PGRAPH_COMBINER1_OUT_RGB,
333 	NV10_PGRAPH_COMBINER_FINAL0,
334 	NV10_PGRAPH_COMBINER_FINAL1,
335 	0x00400e00,
336 	0x00400e04,
337 	0x00400e08,
338 	0x00400e0c,
339 	0x00400e10,
340 	0x00400e14,
341 	0x00400e18,
342 	0x00400e1c,
343 	0x00400e20,
344 	0x00400e24,
345 	0x00400e28,
346 	0x00400e2c,
347 	0x00400e30,
348 	0x00400e34,
349 	0x00400e38,
350 	0x00400e3c,
351 	NV04_PGRAPH_PASSTHRU_0,
352 	NV04_PGRAPH_PASSTHRU_1,
353 	NV04_PGRAPH_PASSTHRU_2,
354 	NV10_PGRAPH_DIMX_TEXTURE,
355 	NV10_PGRAPH_WDIMX_TEXTURE,
356 	NV10_PGRAPH_DVD_COLORFMT,
357 	NV10_PGRAPH_SCALED_FORMAT,
358 	NV04_PGRAPH_MISC24_0,
359 	NV04_PGRAPH_MISC24_1,
360 	NV04_PGRAPH_MISC24_2,
361 	NV03_PGRAPH_X_MISC,
362 	NV03_PGRAPH_Y_MISC,
363 	NV04_PGRAPH_VALID1,
364 	NV04_PGRAPH_VALID2,
365 };
366 
367 static int nv17_gr_ctx_regs[] = {
368 	NV10_PGRAPH_DEBUG_4,
369 	0x004006b0,
370 	0x00400eac,
371 	0x00400eb0,
372 	0x00400eb4,
373 	0x00400eb8,
374 	0x00400ebc,
375 	0x00400ec0,
376 	0x00400ec4,
377 	0x00400ec8,
378 	0x00400ecc,
379 	0x00400ed0,
380 	0x00400ed4,
381 	0x00400ed8,
382 	0x00400edc,
383 	0x00400ee0,
384 	0x00400a00,
385 	0x00400a04,
386 };
387 
388 struct nv10_gr {
389 	struct nvkm_gr base;
390 	struct nv10_gr_chan *chan[32];
391 	spinlock_t lock;
392 };
393 
394 struct nv10_gr_chan {
395 	struct nvkm_object base;
396 	int chid;
397 	int nv10[ARRAY_SIZE(nv10_gr_ctx_regs)];
398 	int nv17[ARRAY_SIZE(nv17_gr_ctx_regs)];
399 	struct pipe_state pipe_state;
400 	u32 lma_window[4];
401 };
402 
403 
404 static inline struct nv10_gr *
405 nv10_gr(struct nv10_gr_chan *chan)
406 {
407 	return (void *)nv_object(chan)->engine;
408 }
409 
410 /*******************************************************************************
411  * Graphics object classes
412  ******************************************************************************/
413 
414 #define PIPE_SAVE(gr, state, addr)					\
415 	do {								\
416 		int __i;						\
417 		nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
418 		for (__i = 0; __i < ARRAY_SIZE(state); __i++)		\
419 			state[__i] = nvkm_rd32(device, NV10_PGRAPH_PIPE_DATA); \
420 	} while (0)
421 
422 #define PIPE_RESTORE(gr, state, addr)					\
423 	do {								\
424 		int __i;						\
425 		nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
426 		for (__i = 0; __i < ARRAY_SIZE(state); __i++)		\
427 			nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, state[__i]); \
428 	} while (0)
429 
430 static struct nvkm_oclass
431 nv10_gr_sclass[] = {
432 	{ 0x0012, &nv04_gr_ofuncs }, /* beta1 */
433 	{ 0x0019, &nv04_gr_ofuncs }, /* clip */
434 	{ 0x0030, &nv04_gr_ofuncs }, /* null */
435 	{ 0x0039, &nv04_gr_ofuncs }, /* m2mf */
436 	{ 0x0043, &nv04_gr_ofuncs }, /* rop */
437 	{ 0x0044, &nv04_gr_ofuncs }, /* pattern */
438 	{ 0x004a, &nv04_gr_ofuncs }, /* gdi */
439 	{ 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
440 	{ 0x005f, &nv04_gr_ofuncs }, /* blit */
441 	{ 0x0062, &nv04_gr_ofuncs }, /* surf2d */
442 	{ 0x0072, &nv04_gr_ofuncs }, /* beta4 */
443 	{ 0x0089, &nv04_gr_ofuncs }, /* sifm */
444 	{ 0x008a, &nv04_gr_ofuncs }, /* ifc */
445 	{ 0x009f, &nv04_gr_ofuncs }, /* blit */
446 	{ 0x0093, &nv04_gr_ofuncs }, /* surf3d */
447 	{ 0x0094, &nv04_gr_ofuncs }, /* ttri */
448 	{ 0x0095, &nv04_gr_ofuncs }, /* mtri */
449 	{ 0x0056, &nv04_gr_ofuncs }, /* celcius */
450 	{},
451 };
452 
453 static struct nvkm_oclass
454 nv15_gr_sclass[] = {
455 	{ 0x0012, &nv04_gr_ofuncs }, /* beta1 */
456 	{ 0x0019, &nv04_gr_ofuncs }, /* clip */
457 	{ 0x0030, &nv04_gr_ofuncs }, /* null */
458 	{ 0x0039, &nv04_gr_ofuncs }, /* m2mf */
459 	{ 0x0043, &nv04_gr_ofuncs }, /* rop */
460 	{ 0x0044, &nv04_gr_ofuncs }, /* pattern */
461 	{ 0x004a, &nv04_gr_ofuncs }, /* gdi */
462 	{ 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
463 	{ 0x005f, &nv04_gr_ofuncs }, /* blit */
464 	{ 0x0062, &nv04_gr_ofuncs }, /* surf2d */
465 	{ 0x0072, &nv04_gr_ofuncs }, /* beta4 */
466 	{ 0x0089, &nv04_gr_ofuncs }, /* sifm */
467 	{ 0x008a, &nv04_gr_ofuncs }, /* ifc */
468 	{ 0x009f, &nv04_gr_ofuncs }, /* blit */
469 	{ 0x0093, &nv04_gr_ofuncs }, /* surf3d */
470 	{ 0x0094, &nv04_gr_ofuncs }, /* ttri */
471 	{ 0x0095, &nv04_gr_ofuncs }, /* mtri */
472 	{ 0x0096, &nv04_gr_ofuncs }, /* celcius */
473 	{},
474 };
475 
476 static int
477 nv17_gr_mthd_lma_window(struct nvkm_object *object, u32 mthd,
478 			void *args, u32 size)
479 {
480 	struct nv10_gr_chan *chan = (void *)object->parent;
481 	struct nv10_gr *gr = nv10_gr(chan);
482 	struct pipe_state *pipe = &chan->pipe_state;
483 	struct nvkm_device *device = gr->base.engine.subdev.device;
484 	u32 pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
485 	u32 xfmode0, xfmode1;
486 	u32 data = *(u32 *)args;
487 	int i;
488 
489 	chan->lma_window[(mthd - 0x1638) / 4] = data;
490 
491 	if (mthd != 0x1644)
492 		return 0;
493 
494 	nv04_gr_idle(gr);
495 
496 	PIPE_SAVE(gr, pipe_0x0040, 0x0040);
497 	PIPE_SAVE(gr, pipe->pipe_0x0200, 0x0200);
498 
499 	PIPE_RESTORE(gr, chan->lma_window, 0x6790);
500 
501 	nv04_gr_idle(gr);
502 
503 	xfmode0 = nvkm_rd32(device, NV10_PGRAPH_XFMODE0);
504 	xfmode1 = nvkm_rd32(device, NV10_PGRAPH_XFMODE1);
505 
506 	PIPE_SAVE(gr, pipe->pipe_0x4400, 0x4400);
507 	PIPE_SAVE(gr, pipe_0x64c0, 0x64c0);
508 	PIPE_SAVE(gr, pipe_0x6ab0, 0x6ab0);
509 	PIPE_SAVE(gr, pipe_0x6a80, 0x6a80);
510 
511 	nv04_gr_idle(gr);
512 
513 	nvkm_wr32(device, NV10_PGRAPH_XFMODE0, 0x10000000);
514 	nvkm_wr32(device, NV10_PGRAPH_XFMODE1, 0x00000000);
515 	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
516 	for (i = 0; i < 4; i++)
517 		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
518 	for (i = 0; i < 4; i++)
519 		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
520 
521 	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
522 	for (i = 0; i < 3; i++)
523 		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
524 
525 	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
526 	for (i = 0; i < 3; i++)
527 		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
528 
529 	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
530 	nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000008);
531 
532 	PIPE_RESTORE(gr, pipe->pipe_0x0200, 0x0200);
533 
534 	nv04_gr_idle(gr);
535 
536 	PIPE_RESTORE(gr, pipe_0x0040, 0x0040);
537 
538 	nvkm_wr32(device, NV10_PGRAPH_XFMODE0, xfmode0);
539 	nvkm_wr32(device, NV10_PGRAPH_XFMODE1, xfmode1);
540 
541 	PIPE_RESTORE(gr, pipe_0x64c0, 0x64c0);
542 	PIPE_RESTORE(gr, pipe_0x6ab0, 0x6ab0);
543 	PIPE_RESTORE(gr, pipe_0x6a80, 0x6a80);
544 	PIPE_RESTORE(gr, pipe->pipe_0x4400, 0x4400);
545 
546 	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
547 	nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
548 
549 	nv04_gr_idle(gr);
550 
551 	return 0;
552 }
553 
554 static int
555 nv17_gr_mthd_lma_enable(struct nvkm_object *object, u32 mthd,
556 			void *args, u32 size)
557 {
558 	struct nv10_gr_chan *chan = (void *)object->parent;
559 	struct nv10_gr *gr = nv10_gr(chan);
560 	struct nvkm_device *device = gr->base.engine.subdev.device;
561 
562 	nv04_gr_idle(gr);
563 
564 	nvkm_mask(device, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100);
565 	nvkm_mask(device, 0x4006b0, 0x08000000, 0x08000000);
566 	return 0;
567 }
568 
569 static struct nvkm_omthds
570 nv17_celcius_omthds[] = {
571 	{ 0x1638, 0x1638, nv17_gr_mthd_lma_window },
572 	{ 0x163c, 0x163c, nv17_gr_mthd_lma_window },
573 	{ 0x1640, 0x1640, nv17_gr_mthd_lma_window },
574 	{ 0x1644, 0x1644, nv17_gr_mthd_lma_window },
575 	{ 0x1658, 0x1658, nv17_gr_mthd_lma_enable },
576 	{}
577 };
578 
579 static struct nvkm_oclass
580 nv17_gr_sclass[] = {
581 	{ 0x0012, &nv04_gr_ofuncs }, /* beta1 */
582 	{ 0x0019, &nv04_gr_ofuncs }, /* clip */
583 	{ 0x0030, &nv04_gr_ofuncs }, /* null */
584 	{ 0x0039, &nv04_gr_ofuncs }, /* m2mf */
585 	{ 0x0043, &nv04_gr_ofuncs }, /* rop */
586 	{ 0x0044, &nv04_gr_ofuncs }, /* pattern */
587 	{ 0x004a, &nv04_gr_ofuncs }, /* gdi */
588 	{ 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
589 	{ 0x005f, &nv04_gr_ofuncs }, /* blit */
590 	{ 0x0062, &nv04_gr_ofuncs }, /* surf2d */
591 	{ 0x0072, &nv04_gr_ofuncs }, /* beta4 */
592 	{ 0x0089, &nv04_gr_ofuncs }, /* sifm */
593 	{ 0x008a, &nv04_gr_ofuncs }, /* ifc */
594 	{ 0x009f, &nv04_gr_ofuncs }, /* blit */
595 	{ 0x0093, &nv04_gr_ofuncs }, /* surf3d */
596 	{ 0x0094, &nv04_gr_ofuncs }, /* ttri */
597 	{ 0x0095, &nv04_gr_ofuncs }, /* mtri */
598 	{ 0x0099, &nv04_gr_ofuncs, nv17_celcius_omthds },
599 	{},
600 };
601 
602 /*******************************************************************************
603  * PGRAPH context
604  ******************************************************************************/
605 
606 static struct nv10_gr_chan *
607 nv10_gr_channel(struct nv10_gr *gr)
608 {
609 	struct nvkm_device *device = gr->base.engine.subdev.device;
610 	struct nv10_gr_chan *chan = NULL;
611 	if (nvkm_rd32(device, 0x400144) & 0x00010000) {
612 		int chid = nvkm_rd32(device, 0x400148) >> 24;
613 		if (chid < ARRAY_SIZE(gr->chan))
614 			chan = gr->chan[chid];
615 	}
616 	return chan;
617 }
618 
619 static void
620 nv10_gr_save_pipe(struct nv10_gr_chan *chan)
621 {
622 	struct nv10_gr *gr = nv10_gr(chan);
623 	struct pipe_state *pipe = &chan->pipe_state;
624 	struct nvkm_device *device = gr->base.engine.subdev.device;
625 
626 	PIPE_SAVE(gr, pipe->pipe_0x4400, 0x4400);
627 	PIPE_SAVE(gr, pipe->pipe_0x0200, 0x0200);
628 	PIPE_SAVE(gr, pipe->pipe_0x6400, 0x6400);
629 	PIPE_SAVE(gr, pipe->pipe_0x6800, 0x6800);
630 	PIPE_SAVE(gr, pipe->pipe_0x6c00, 0x6c00);
631 	PIPE_SAVE(gr, pipe->pipe_0x7000, 0x7000);
632 	PIPE_SAVE(gr, pipe->pipe_0x7400, 0x7400);
633 	PIPE_SAVE(gr, pipe->pipe_0x7800, 0x7800);
634 	PIPE_SAVE(gr, pipe->pipe_0x0040, 0x0040);
635 	PIPE_SAVE(gr, pipe->pipe_0x0000, 0x0000);
636 }
637 
638 static void
639 nv10_gr_load_pipe(struct nv10_gr_chan *chan)
640 {
641 	struct nv10_gr *gr = nv10_gr(chan);
642 	struct pipe_state *pipe = &chan->pipe_state;
643 	struct nvkm_device *device = gr->base.engine.subdev.device;
644 	u32 xfmode0, xfmode1;
645 	int i;
646 
647 	nv04_gr_idle(gr);
648 	/* XXX check haiku comments */
649 	xfmode0 = nvkm_rd32(device, NV10_PGRAPH_XFMODE0);
650 	xfmode1 = nvkm_rd32(device, NV10_PGRAPH_XFMODE1);
651 	nvkm_wr32(device, NV10_PGRAPH_XFMODE0, 0x10000000);
652 	nvkm_wr32(device, NV10_PGRAPH_XFMODE1, 0x00000000);
653 	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
654 	for (i = 0; i < 4; i++)
655 		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
656 	for (i = 0; i < 4; i++)
657 		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
658 
659 	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
660 	for (i = 0; i < 3; i++)
661 		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
662 
663 	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
664 	for (i = 0; i < 3; i++)
665 		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
666 
667 	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
668 	nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000008);
669 
670 
671 	PIPE_RESTORE(gr, pipe->pipe_0x0200, 0x0200);
672 	nv04_gr_idle(gr);
673 
674 	/* restore XFMODE */
675 	nvkm_wr32(device, NV10_PGRAPH_XFMODE0, xfmode0);
676 	nvkm_wr32(device, NV10_PGRAPH_XFMODE1, xfmode1);
677 	PIPE_RESTORE(gr, pipe->pipe_0x6400, 0x6400);
678 	PIPE_RESTORE(gr, pipe->pipe_0x6800, 0x6800);
679 	PIPE_RESTORE(gr, pipe->pipe_0x6c00, 0x6c00);
680 	PIPE_RESTORE(gr, pipe->pipe_0x7000, 0x7000);
681 	PIPE_RESTORE(gr, pipe->pipe_0x7400, 0x7400);
682 	PIPE_RESTORE(gr, pipe->pipe_0x7800, 0x7800);
683 	PIPE_RESTORE(gr, pipe->pipe_0x4400, 0x4400);
684 	PIPE_RESTORE(gr, pipe->pipe_0x0000, 0x0000);
685 	PIPE_RESTORE(gr, pipe->pipe_0x0040, 0x0040);
686 	nv04_gr_idle(gr);
687 }
688 
689 static void
690 nv10_gr_create_pipe(struct nv10_gr_chan *chan)
691 {
692 	struct nv10_gr *gr = nv10_gr(chan);
693 	struct pipe_state *pipe_state = &chan->pipe_state;
694 	u32 *pipe_state_addr;
695 	int i;
696 #define PIPE_INIT(addr) \
697 	do { \
698 		pipe_state_addr = pipe_state->pipe_##addr; \
699 	} while (0)
700 #define PIPE_INIT_END(addr) \
701 	do { \
702 		u32 *__end_addr = pipe_state->pipe_##addr + \
703 				ARRAY_SIZE(pipe_state->pipe_##addr); \
704 		if (pipe_state_addr != __end_addr) \
705 			nv_error(gr, "incomplete pipe init for 0x%x :  %p/%p\n", \
706 				addr, pipe_state_addr, __end_addr); \
707 	} while (0)
708 #define NV_WRITE_PIPE_INIT(value) *(pipe_state_addr++) = value
709 
710 	PIPE_INIT(0x0200);
711 	for (i = 0; i < 48; i++)
712 		NV_WRITE_PIPE_INIT(0x00000000);
713 	PIPE_INIT_END(0x0200);
714 
715 	PIPE_INIT(0x6400);
716 	for (i = 0; i < 211; i++)
717 		NV_WRITE_PIPE_INIT(0x00000000);
718 	NV_WRITE_PIPE_INIT(0x3f800000);
719 	NV_WRITE_PIPE_INIT(0x40000000);
720 	NV_WRITE_PIPE_INIT(0x40000000);
721 	NV_WRITE_PIPE_INIT(0x40000000);
722 	NV_WRITE_PIPE_INIT(0x40000000);
723 	NV_WRITE_PIPE_INIT(0x00000000);
724 	NV_WRITE_PIPE_INIT(0x00000000);
725 	NV_WRITE_PIPE_INIT(0x3f800000);
726 	NV_WRITE_PIPE_INIT(0x00000000);
727 	NV_WRITE_PIPE_INIT(0x3f000000);
728 	NV_WRITE_PIPE_INIT(0x3f000000);
729 	NV_WRITE_PIPE_INIT(0x00000000);
730 	NV_WRITE_PIPE_INIT(0x00000000);
731 	NV_WRITE_PIPE_INIT(0x00000000);
732 	NV_WRITE_PIPE_INIT(0x00000000);
733 	NV_WRITE_PIPE_INIT(0x3f800000);
734 	NV_WRITE_PIPE_INIT(0x00000000);
735 	NV_WRITE_PIPE_INIT(0x00000000);
736 	NV_WRITE_PIPE_INIT(0x00000000);
737 	NV_WRITE_PIPE_INIT(0x00000000);
738 	NV_WRITE_PIPE_INIT(0x00000000);
739 	NV_WRITE_PIPE_INIT(0x3f800000);
740 	NV_WRITE_PIPE_INIT(0x3f800000);
741 	NV_WRITE_PIPE_INIT(0x3f800000);
742 	NV_WRITE_PIPE_INIT(0x3f800000);
743 	PIPE_INIT_END(0x6400);
744 
745 	PIPE_INIT(0x6800);
746 	for (i = 0; i < 162; i++)
747 		NV_WRITE_PIPE_INIT(0x00000000);
748 	NV_WRITE_PIPE_INIT(0x3f800000);
749 	for (i = 0; i < 25; i++)
750 		NV_WRITE_PIPE_INIT(0x00000000);
751 	PIPE_INIT_END(0x6800);
752 
753 	PIPE_INIT(0x6c00);
754 	NV_WRITE_PIPE_INIT(0x00000000);
755 	NV_WRITE_PIPE_INIT(0x00000000);
756 	NV_WRITE_PIPE_INIT(0x00000000);
757 	NV_WRITE_PIPE_INIT(0x00000000);
758 	NV_WRITE_PIPE_INIT(0xbf800000);
759 	NV_WRITE_PIPE_INIT(0x00000000);
760 	NV_WRITE_PIPE_INIT(0x00000000);
761 	NV_WRITE_PIPE_INIT(0x00000000);
762 	NV_WRITE_PIPE_INIT(0x00000000);
763 	NV_WRITE_PIPE_INIT(0x00000000);
764 	NV_WRITE_PIPE_INIT(0x00000000);
765 	NV_WRITE_PIPE_INIT(0x00000000);
766 	PIPE_INIT_END(0x6c00);
767 
768 	PIPE_INIT(0x7000);
769 	NV_WRITE_PIPE_INIT(0x00000000);
770 	NV_WRITE_PIPE_INIT(0x00000000);
771 	NV_WRITE_PIPE_INIT(0x00000000);
772 	NV_WRITE_PIPE_INIT(0x00000000);
773 	NV_WRITE_PIPE_INIT(0x00000000);
774 	NV_WRITE_PIPE_INIT(0x00000000);
775 	NV_WRITE_PIPE_INIT(0x00000000);
776 	NV_WRITE_PIPE_INIT(0x00000000);
777 	NV_WRITE_PIPE_INIT(0x00000000);
778 	NV_WRITE_PIPE_INIT(0x00000000);
779 	NV_WRITE_PIPE_INIT(0x00000000);
780 	NV_WRITE_PIPE_INIT(0x00000000);
781 	NV_WRITE_PIPE_INIT(0x7149f2ca);
782 	NV_WRITE_PIPE_INIT(0x00000000);
783 	NV_WRITE_PIPE_INIT(0x00000000);
784 	NV_WRITE_PIPE_INIT(0x00000000);
785 	NV_WRITE_PIPE_INIT(0x7149f2ca);
786 	NV_WRITE_PIPE_INIT(0x00000000);
787 	NV_WRITE_PIPE_INIT(0x00000000);
788 	NV_WRITE_PIPE_INIT(0x00000000);
789 	NV_WRITE_PIPE_INIT(0x7149f2ca);
790 	NV_WRITE_PIPE_INIT(0x00000000);
791 	NV_WRITE_PIPE_INIT(0x00000000);
792 	NV_WRITE_PIPE_INIT(0x00000000);
793 	NV_WRITE_PIPE_INIT(0x7149f2ca);
794 	NV_WRITE_PIPE_INIT(0x00000000);
795 	NV_WRITE_PIPE_INIT(0x00000000);
796 	NV_WRITE_PIPE_INIT(0x00000000);
797 	NV_WRITE_PIPE_INIT(0x7149f2ca);
798 	NV_WRITE_PIPE_INIT(0x00000000);
799 	NV_WRITE_PIPE_INIT(0x00000000);
800 	NV_WRITE_PIPE_INIT(0x00000000);
801 	NV_WRITE_PIPE_INIT(0x7149f2ca);
802 	NV_WRITE_PIPE_INIT(0x00000000);
803 	NV_WRITE_PIPE_INIT(0x00000000);
804 	NV_WRITE_PIPE_INIT(0x00000000);
805 	NV_WRITE_PIPE_INIT(0x7149f2ca);
806 	NV_WRITE_PIPE_INIT(0x00000000);
807 	NV_WRITE_PIPE_INIT(0x00000000);
808 	NV_WRITE_PIPE_INIT(0x00000000);
809 	NV_WRITE_PIPE_INIT(0x7149f2ca);
810 	for (i = 0; i < 35; i++)
811 		NV_WRITE_PIPE_INIT(0x00000000);
812 	PIPE_INIT_END(0x7000);
813 
814 	PIPE_INIT(0x7400);
815 	for (i = 0; i < 48; i++)
816 		NV_WRITE_PIPE_INIT(0x00000000);
817 	PIPE_INIT_END(0x7400);
818 
819 	PIPE_INIT(0x7800);
820 	for (i = 0; i < 48; i++)
821 		NV_WRITE_PIPE_INIT(0x00000000);
822 	PIPE_INIT_END(0x7800);
823 
824 	PIPE_INIT(0x4400);
825 	for (i = 0; i < 32; i++)
826 		NV_WRITE_PIPE_INIT(0x00000000);
827 	PIPE_INIT_END(0x4400);
828 
829 	PIPE_INIT(0x0000);
830 	for (i = 0; i < 16; i++)
831 		NV_WRITE_PIPE_INIT(0x00000000);
832 	PIPE_INIT_END(0x0000);
833 
834 	PIPE_INIT(0x0040);
835 	for (i = 0; i < 4; i++)
836 		NV_WRITE_PIPE_INIT(0x00000000);
837 	PIPE_INIT_END(0x0040);
838 
839 #undef PIPE_INIT
840 #undef PIPE_INIT_END
841 #undef NV_WRITE_PIPE_INIT
842 }
843 
844 static int
845 nv10_gr_ctx_regs_find_offset(struct nv10_gr *gr, int reg)
846 {
847 	int i;
848 	for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++) {
849 		if (nv10_gr_ctx_regs[i] == reg)
850 			return i;
851 	}
852 	nv_error(gr, "unknow offset nv10_ctx_regs %d\n", reg);
853 	return -1;
854 }
855 
856 static int
857 nv17_gr_ctx_regs_find_offset(struct nv10_gr *gr, int reg)
858 {
859 	int i;
860 	for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++) {
861 		if (nv17_gr_ctx_regs[i] == reg)
862 			return i;
863 	}
864 	nv_error(gr, "unknow offset nv17_ctx_regs %d\n", reg);
865 	return -1;
866 }
867 
868 static void
869 nv10_gr_load_dma_vtxbuf(struct nv10_gr_chan *chan, int chid, u32 inst)
870 {
871 	struct nv10_gr *gr = nv10_gr(chan);
872 	struct nvkm_device *device = gr->base.engine.subdev.device;
873 	u32 st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
874 	u32 ctx_user, ctx_switch[5];
875 	int i, subchan = -1;
876 
877 	/* NV10TCL_DMA_VTXBUF (method 0x18c) modifies hidden state
878 	 * that cannot be restored via MMIO. Do it through the FIFO
879 	 * instead.
880 	 */
881 
882 	/* Look for a celsius object */
883 	for (i = 0; i < 8; i++) {
884 		int class = nvkm_rd32(device, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
885 
886 		if (class == 0x56 || class == 0x96 || class == 0x99) {
887 			subchan = i;
888 			break;
889 		}
890 	}
891 
892 	if (subchan < 0 || !inst)
893 		return;
894 
895 	/* Save the current ctx object */
896 	ctx_user = nvkm_rd32(device, NV10_PGRAPH_CTX_USER);
897 	for (i = 0; i < 5; i++)
898 		ctx_switch[i] = nvkm_rd32(device, NV10_PGRAPH_CTX_SWITCH(i));
899 
900 	/* Save the FIFO state */
901 	st2 = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2);
902 	st2_dl = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2_DL);
903 	st2_dh = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2_DH);
904 	fifo_ptr = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR);
905 
906 	for (i = 0; i < ARRAY_SIZE(fifo); i++)
907 		fifo[i] = nvkm_rd32(device, 0x4007a0 + 4 * i);
908 
909 	/* Switch to the celsius subchannel */
910 	for (i = 0; i < 5; i++)
911 		nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(i),
912 			nvkm_rd32(device, NV10_PGRAPH_CTX_CACHE(subchan, i)));
913 	nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
914 
915 	/* Inject NV10TCL_DMA_VTXBUF */
916 	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
917 	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2,
918 		0x2c000000 | chid << 20 | subchan << 16 | 0x18c);
919 	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
920 	nvkm_mask(device, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
921 	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
922 	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
923 
924 	/* Restore the FIFO state */
925 	for (i = 0; i < ARRAY_SIZE(fifo); i++)
926 		nvkm_wr32(device, 0x4007a0 + 4 * i, fifo[i]);
927 
928 	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
929 	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2, st2);
930 	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
931 	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
932 
933 	/* Restore the current ctx object */
934 	for (i = 0; i < 5; i++)
935 		nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
936 	nvkm_wr32(device, NV10_PGRAPH_CTX_USER, ctx_user);
937 }
938 
939 static int
940 nv10_gr_load_context(struct nv10_gr_chan *chan, int chid)
941 {
942 	struct nv10_gr *gr = nv10_gr(chan);
943 	struct nvkm_device *device = gr->base.engine.subdev.device;
944 	u32 inst;
945 	int i;
946 
947 	for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++)
948 		nvkm_wr32(device, nv10_gr_ctx_regs[i], chan->nv10[i]);
949 
950 	if (nv_device(gr)->card_type >= NV_11 &&
951 	    nv_device(gr)->chipset >= 0x17) {
952 		for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++)
953 			nvkm_wr32(device, nv17_gr_ctx_regs[i], chan->nv17[i]);
954 	}
955 
956 	nv10_gr_load_pipe(chan);
957 
958 	inst = nvkm_rd32(device, NV10_PGRAPH_GLOBALSTATE1) & 0xffff;
959 	nv10_gr_load_dma_vtxbuf(chan, chid, inst);
960 
961 	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
962 	nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24);
963 	nvkm_mask(device, NV10_PGRAPH_FFINTFC_ST2, 0x30000000, 0x00000000);
964 	return 0;
965 }
966 
967 static int
968 nv10_gr_unload_context(struct nv10_gr_chan *chan)
969 {
970 	struct nv10_gr *gr = nv10_gr(chan);
971 	struct nvkm_device *device = gr->base.engine.subdev.device;
972 	int i;
973 
974 	for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++)
975 		chan->nv10[i] = nvkm_rd32(device, nv10_gr_ctx_regs[i]);
976 
977 	if (nv_device(gr)->card_type >= NV_11 &&
978 	    nv_device(gr)->chipset >= 0x17) {
979 		for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++)
980 			chan->nv17[i] = nvkm_rd32(device, nv17_gr_ctx_regs[i]);
981 	}
982 
983 	nv10_gr_save_pipe(chan);
984 
985 	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
986 	nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
987 	return 0;
988 }
989 
990 static void
991 nv10_gr_context_switch(struct nv10_gr *gr)
992 {
993 	struct nvkm_device *device = gr->base.engine.subdev.device;
994 	struct nv10_gr_chan *prev = NULL;
995 	struct nv10_gr_chan *next = NULL;
996 	unsigned long flags;
997 	int chid;
998 
999 	spin_lock_irqsave(&gr->lock, flags);
1000 	nv04_gr_idle(gr);
1001 
1002 	/* If previous context is valid, we need to save it */
1003 	prev = nv10_gr_channel(gr);
1004 	if (prev)
1005 		nv10_gr_unload_context(prev);
1006 
1007 	/* load context for next channel */
1008 	chid = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
1009 	next = gr->chan[chid];
1010 	if (next)
1011 		nv10_gr_load_context(next, chid);
1012 
1013 	spin_unlock_irqrestore(&gr->lock, flags);
1014 }
1015 
1016 #define NV_WRITE_CTX(reg, val) do { \
1017 	int offset = nv10_gr_ctx_regs_find_offset(gr, reg); \
1018 	if (offset > 0) \
1019 		chan->nv10[offset] = val; \
1020 	} while (0)
1021 
1022 #define NV17_WRITE_CTX(reg, val) do { \
1023 	int offset = nv17_gr_ctx_regs_find_offset(gr, reg); \
1024 	if (offset > 0) \
1025 		chan->nv17[offset] = val; \
1026 	} while (0)
1027 
1028 static int
1029 nv10_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
1030 		     struct nvkm_oclass *oclass, void *data, u32 size,
1031 		     struct nvkm_object **pobject)
1032 {
1033 	struct nvkm_fifo_chan *fifo = (void *)parent;
1034 	struct nv10_gr *gr = (void *)engine;
1035 	struct nv10_gr_chan *chan;
1036 	struct nvkm_device *device = gr->base.engine.subdev.device;
1037 	unsigned long flags;
1038 	int ret;
1039 
1040 	ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
1041 	*pobject = nv_object(chan);
1042 	if (ret)
1043 		return ret;
1044 
1045 	spin_lock_irqsave(&gr->lock, flags);
1046 	if (gr->chan[fifo->chid]) {
1047 		*pobject = nv_object(gr->chan[fifo->chid]);
1048 		atomic_inc(&(*pobject)->refcount);
1049 		spin_unlock_irqrestore(&gr->lock, flags);
1050 		nvkm_object_destroy(&chan->base);
1051 		return 1;
1052 	}
1053 
1054 	NV_WRITE_CTX(0x00400e88, 0x08000000);
1055 	NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
1056 	NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
1057 	NV_WRITE_CTX(0x00400e10, 0x00001000);
1058 	NV_WRITE_CTX(0x00400e14, 0x00001000);
1059 	NV_WRITE_CTX(0x00400e30, 0x00080008);
1060 	NV_WRITE_CTX(0x00400e34, 0x00080008);
1061 	if (nv_device(gr)->card_type >= NV_11 &&
1062 	    nv_device(gr)->chipset >= 0x17) {
1063 		/* is it really needed ??? */
1064 		NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
1065 					nvkm_rd32(device, NV10_PGRAPH_DEBUG_4));
1066 		NV17_WRITE_CTX(0x004006b0, nvkm_rd32(device, 0x004006b0));
1067 		NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
1068 		NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
1069 		NV17_WRITE_CTX(0x00400ec0, 0x00000080);
1070 		NV17_WRITE_CTX(0x00400ed0, 0x00000080);
1071 	}
1072 	NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->chid << 24);
1073 
1074 	nv10_gr_create_pipe(chan);
1075 
1076 	gr->chan[fifo->chid] = chan;
1077 	chan->chid = fifo->chid;
1078 	spin_unlock_irqrestore(&gr->lock, flags);
1079 	return 0;
1080 }
1081 
1082 static void
1083 nv10_gr_context_dtor(struct nvkm_object *object)
1084 {
1085 	struct nv10_gr *gr = (void *)object->engine;
1086 	struct nv10_gr_chan *chan = (void *)object;
1087 	unsigned long flags;
1088 
1089 	spin_lock_irqsave(&gr->lock, flags);
1090 	gr->chan[chan->chid] = NULL;
1091 	spin_unlock_irqrestore(&gr->lock, flags);
1092 
1093 	nvkm_object_destroy(&chan->base);
1094 }
1095 
1096 static int
1097 nv10_gr_context_fini(struct nvkm_object *object, bool suspend)
1098 {
1099 	struct nv10_gr *gr = (void *)object->engine;
1100 	struct nv10_gr_chan *chan = (void *)object;
1101 	struct nvkm_device *device = gr->base.engine.subdev.device;
1102 	unsigned long flags;
1103 
1104 	spin_lock_irqsave(&gr->lock, flags);
1105 	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
1106 	if (nv10_gr_channel(gr) == chan)
1107 		nv10_gr_unload_context(chan);
1108 	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
1109 	spin_unlock_irqrestore(&gr->lock, flags);
1110 
1111 	return nvkm_object_fini(&chan->base, suspend);
1112 }
1113 
1114 static struct nvkm_oclass
1115 nv10_gr_cclass = {
1116 	.handle = NV_ENGCTX(GR, 0x10),
1117 	.ofuncs = &(struct nvkm_ofuncs) {
1118 		.ctor = nv10_gr_context_ctor,
1119 		.dtor = nv10_gr_context_dtor,
1120 		.init = nvkm_object_init,
1121 		.fini = nv10_gr_context_fini,
1122 	},
1123 };
1124 
1125 /*******************************************************************************
1126  * PGRAPH engine/subdev functions
1127  ******************************************************************************/
1128 
1129 static void
1130 nv10_gr_tile_prog(struct nvkm_engine *engine, int i)
1131 {
1132 	struct nv10_gr *gr = (void *)engine;
1133 	struct nvkm_device *device = gr->base.engine.subdev.device;
1134 	struct nvkm_fifo *fifo = device->fifo;
1135 	struct nvkm_fb_tile *tile = &device->fb->tile.region[i];
1136 	unsigned long flags;
1137 
1138 	fifo->pause(fifo, &flags);
1139 	nv04_gr_idle(gr);
1140 
1141 	nvkm_wr32(device, NV10_PGRAPH_TLIMIT(i), tile->limit);
1142 	nvkm_wr32(device, NV10_PGRAPH_TSIZE(i), tile->pitch);
1143 	nvkm_wr32(device, NV10_PGRAPH_TILE(i), tile->addr);
1144 
1145 	fifo->start(fifo, &flags);
1146 }
1147 
1148 const struct nvkm_bitfield nv10_gr_intr_name[] = {
1149 	{ NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
1150 	{ NV_PGRAPH_INTR_ERROR,  "ERROR"  },
1151 	{}
1152 };
1153 
1154 const struct nvkm_bitfield nv10_gr_nstatus[] = {
1155 	{ NV10_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
1156 	{ NV10_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
1157 	{ NV10_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
1158 	{ NV10_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" },
1159 	{}
1160 };
1161 
1162 static void
1163 nv10_gr_intr(struct nvkm_subdev *subdev)
1164 {
1165 	struct nv10_gr *gr = (void *)subdev;
1166 	struct nv10_gr_chan *chan = NULL;
1167 	struct nvkm_namedb *namedb = NULL;
1168 	struct nvkm_handle *handle = NULL;
1169 	struct nvkm_device *device = gr->base.engine.subdev.device;
1170 	u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
1171 	u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
1172 	u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
1173 	u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
1174 	u32 chid = (addr & 0x01f00000) >> 20;
1175 	u32 subc = (addr & 0x00070000) >> 16;
1176 	u32 mthd = (addr & 0x00001ffc);
1177 	u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
1178 	u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xfff;
1179 	u32 show = stat;
1180 	unsigned long flags;
1181 
1182 	spin_lock_irqsave(&gr->lock, flags);
1183 	chan = gr->chan[chid];
1184 	if (chan)
1185 		namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
1186 	spin_unlock_irqrestore(&gr->lock, flags);
1187 
1188 	if (stat & NV_PGRAPH_INTR_ERROR) {
1189 		if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
1190 			handle = nvkm_namedb_get_class(namedb, class);
1191 			if (handle && !nv_call(handle->object, mthd, data))
1192 				show &= ~NV_PGRAPH_INTR_ERROR;
1193 		}
1194 	}
1195 
1196 	if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1197 		nvkm_wr32(device, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1198 		stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1199 		show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1200 		nv10_gr_context_switch(gr);
1201 	}
1202 
1203 	nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
1204 	nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
1205 
1206 	if (show) {
1207 		nv_error(gr, "%s", "");
1208 		nvkm_bitfield_print(nv10_gr_intr_name, show);
1209 		pr_cont(" nsource:");
1210 		nvkm_bitfield_print(nv04_gr_nsource, nsource);
1211 		pr_cont(" nstatus:");
1212 		nvkm_bitfield_print(nv10_gr_nstatus, nstatus);
1213 		pr_cont("\n");
1214 		nv_error(gr,
1215 			 "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
1216 			 chid, nvkm_client_name(chan), subc, class, mthd,
1217 			 data);
1218 	}
1219 
1220 	nvkm_namedb_put(handle);
1221 }
1222 
1223 static int
1224 nv10_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
1225 	     struct nvkm_oclass *oclass, void *data, u32 size,
1226 	     struct nvkm_object **pobject)
1227 {
1228 	struct nv10_gr *gr;
1229 	int ret;
1230 
1231 	ret = nvkm_gr_create(parent, engine, oclass, true, &gr);
1232 	*pobject = nv_object(gr);
1233 	if (ret)
1234 		return ret;
1235 
1236 	nv_subdev(gr)->unit = 0x00001000;
1237 	nv_subdev(gr)->intr = nv10_gr_intr;
1238 	nv_engine(gr)->cclass = &nv10_gr_cclass;
1239 
1240 	if (nv_device(gr)->chipset <= 0x10)
1241 		nv_engine(gr)->sclass = nv10_gr_sclass;
1242 	else
1243 	if (nv_device(gr)->chipset <  0x17 ||
1244 	    nv_device(gr)->card_type < NV_11)
1245 		nv_engine(gr)->sclass = nv15_gr_sclass;
1246 	else
1247 		nv_engine(gr)->sclass = nv17_gr_sclass;
1248 
1249 	nv_engine(gr)->tile_prog = nv10_gr_tile_prog;
1250 	spin_lock_init(&gr->lock);
1251 	return 0;
1252 }
1253 
1254 static void
1255 nv10_gr_dtor(struct nvkm_object *object)
1256 {
1257 	struct nv10_gr *gr = (void *)object;
1258 	nvkm_gr_destroy(&gr->base);
1259 }
1260 
1261 static int
1262 nv10_gr_init(struct nvkm_object *object)
1263 {
1264 	struct nvkm_engine *engine = nv_engine(object);
1265 	struct nv10_gr *gr = (void *)engine;
1266 	struct nvkm_device *device = gr->base.engine.subdev.device;
1267 	struct nvkm_fb *fb = device->fb;
1268 	int ret, i;
1269 
1270 	ret = nvkm_gr_init(&gr->base);
1271 	if (ret)
1272 		return ret;
1273 
1274 	nvkm_wr32(device, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
1275 	nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
1276 
1277 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
1278 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
1279 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x00118700);
1280 	/* nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
1281 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
1282 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
1283 
1284 	if (nv_device(gr)->card_type >= NV_11 &&
1285 	    nv_device(gr)->chipset >= 0x17) {
1286 		nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x1f000000);
1287 		nvkm_wr32(device, 0x400a10, 0x03ff3fb6);
1288 		nvkm_wr32(device, 0x400838, 0x002f8684);
1289 		nvkm_wr32(device, 0x40083c, 0x00115f3f);
1290 		nvkm_wr32(device, 0x4006b0, 0x40000020);
1291 	} else {
1292 		nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00000000);
1293 	}
1294 
1295 	/* Turn all the tiling regions off. */
1296 	for (i = 0; i < fb->tile.regions; i++)
1297 		engine->tile_prog(engine, i);
1298 
1299 	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
1300 	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
1301 	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
1302 	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
1303 	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
1304 	nvkm_wr32(device, NV10_PGRAPH_STATE, 0xFFFFFFFF);
1305 
1306 	nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
1307 	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
1308 	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
1309 	return 0;
1310 }
1311 
1312 static int
1313 nv10_gr_fini(struct nvkm_object *object, bool suspend)
1314 {
1315 	struct nv10_gr *gr = (void *)object;
1316 	return nvkm_gr_fini(&gr->base, suspend);
1317 }
1318 
1319 struct nvkm_oclass
1320 nv10_gr_oclass = {
1321 	.handle = NV_ENGINE(GR, 0x10),
1322 	.ofuncs = &(struct nvkm_ofuncs) {
1323 		.ctor = nv10_gr_ctor,
1324 		.dtor = nv10_gr_dtor,
1325 		.init = nv10_gr_init,
1326 		.fini = nv10_gr_fini,
1327 	},
1328 };
1329