1 /*
2  * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragr) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  */
24 #include "nv10.h"
25 #include "regs.h"
26 
27 #include <core/client.h>
28 #include <core/gpuobj.h>
29 #include <engine/fifo.h>
30 #include <engine/fifo/chan.h>
31 #include <subdev/fb.h>
32 
33 struct pipe_state {
34 	u32 pipe_0x0000[0x040/4];
35 	u32 pipe_0x0040[0x010/4];
36 	u32 pipe_0x0200[0x0c0/4];
37 	u32 pipe_0x4400[0x080/4];
38 	u32 pipe_0x6400[0x3b0/4];
39 	u32 pipe_0x6800[0x2f0/4];
40 	u32 pipe_0x6c00[0x030/4];
41 	u32 pipe_0x7000[0x130/4];
42 	u32 pipe_0x7400[0x0c0/4];
43 	u32 pipe_0x7800[0x0c0/4];
44 };
45 
46 static int nv10_gr_ctx_regs[] = {
47 	NV10_PGRAPH_CTX_SWITCH(0),
48 	NV10_PGRAPH_CTX_SWITCH(1),
49 	NV10_PGRAPH_CTX_SWITCH(2),
50 	NV10_PGRAPH_CTX_SWITCH(3),
51 	NV10_PGRAPH_CTX_SWITCH(4),
52 	NV10_PGRAPH_CTX_CACHE(0, 0),
53 	NV10_PGRAPH_CTX_CACHE(0, 1),
54 	NV10_PGRAPH_CTX_CACHE(0, 2),
55 	NV10_PGRAPH_CTX_CACHE(0, 3),
56 	NV10_PGRAPH_CTX_CACHE(0, 4),
57 	NV10_PGRAPH_CTX_CACHE(1, 0),
58 	NV10_PGRAPH_CTX_CACHE(1, 1),
59 	NV10_PGRAPH_CTX_CACHE(1, 2),
60 	NV10_PGRAPH_CTX_CACHE(1, 3),
61 	NV10_PGRAPH_CTX_CACHE(1, 4),
62 	NV10_PGRAPH_CTX_CACHE(2, 0),
63 	NV10_PGRAPH_CTX_CACHE(2, 1),
64 	NV10_PGRAPH_CTX_CACHE(2, 2),
65 	NV10_PGRAPH_CTX_CACHE(2, 3),
66 	NV10_PGRAPH_CTX_CACHE(2, 4),
67 	NV10_PGRAPH_CTX_CACHE(3, 0),
68 	NV10_PGRAPH_CTX_CACHE(3, 1),
69 	NV10_PGRAPH_CTX_CACHE(3, 2),
70 	NV10_PGRAPH_CTX_CACHE(3, 3),
71 	NV10_PGRAPH_CTX_CACHE(3, 4),
72 	NV10_PGRAPH_CTX_CACHE(4, 0),
73 	NV10_PGRAPH_CTX_CACHE(4, 1),
74 	NV10_PGRAPH_CTX_CACHE(4, 2),
75 	NV10_PGRAPH_CTX_CACHE(4, 3),
76 	NV10_PGRAPH_CTX_CACHE(4, 4),
77 	NV10_PGRAPH_CTX_CACHE(5, 0),
78 	NV10_PGRAPH_CTX_CACHE(5, 1),
79 	NV10_PGRAPH_CTX_CACHE(5, 2),
80 	NV10_PGRAPH_CTX_CACHE(5, 3),
81 	NV10_PGRAPH_CTX_CACHE(5, 4),
82 	NV10_PGRAPH_CTX_CACHE(6, 0),
83 	NV10_PGRAPH_CTX_CACHE(6, 1),
84 	NV10_PGRAPH_CTX_CACHE(6, 2),
85 	NV10_PGRAPH_CTX_CACHE(6, 3),
86 	NV10_PGRAPH_CTX_CACHE(6, 4),
87 	NV10_PGRAPH_CTX_CACHE(7, 0),
88 	NV10_PGRAPH_CTX_CACHE(7, 1),
89 	NV10_PGRAPH_CTX_CACHE(7, 2),
90 	NV10_PGRAPH_CTX_CACHE(7, 3),
91 	NV10_PGRAPH_CTX_CACHE(7, 4),
92 	NV10_PGRAPH_CTX_USER,
93 	NV04_PGRAPH_DMA_START_0,
94 	NV04_PGRAPH_DMA_START_1,
95 	NV04_PGRAPH_DMA_LENGTH,
96 	NV04_PGRAPH_DMA_MISC,
97 	NV10_PGRAPH_DMA_PITCH,
98 	NV04_PGRAPH_BOFFSET0,
99 	NV04_PGRAPH_BBASE0,
100 	NV04_PGRAPH_BLIMIT0,
101 	NV04_PGRAPH_BOFFSET1,
102 	NV04_PGRAPH_BBASE1,
103 	NV04_PGRAPH_BLIMIT1,
104 	NV04_PGRAPH_BOFFSET2,
105 	NV04_PGRAPH_BBASE2,
106 	NV04_PGRAPH_BLIMIT2,
107 	NV04_PGRAPH_BOFFSET3,
108 	NV04_PGRAPH_BBASE3,
109 	NV04_PGRAPH_BLIMIT3,
110 	NV04_PGRAPH_BOFFSET4,
111 	NV04_PGRAPH_BBASE4,
112 	NV04_PGRAPH_BLIMIT4,
113 	NV04_PGRAPH_BOFFSET5,
114 	NV04_PGRAPH_BBASE5,
115 	NV04_PGRAPH_BLIMIT5,
116 	NV04_PGRAPH_BPITCH0,
117 	NV04_PGRAPH_BPITCH1,
118 	NV04_PGRAPH_BPITCH2,
119 	NV04_PGRAPH_BPITCH3,
120 	NV04_PGRAPH_BPITCH4,
121 	NV10_PGRAPH_SURFACE,
122 	NV10_PGRAPH_STATE,
123 	NV04_PGRAPH_BSWIZZLE2,
124 	NV04_PGRAPH_BSWIZZLE5,
125 	NV04_PGRAPH_BPIXEL,
126 	NV10_PGRAPH_NOTIFY,
127 	NV04_PGRAPH_PATT_COLOR0,
128 	NV04_PGRAPH_PATT_COLOR1,
129 	NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
130 	0x00400904,
131 	0x00400908,
132 	0x0040090c,
133 	0x00400910,
134 	0x00400914,
135 	0x00400918,
136 	0x0040091c,
137 	0x00400920,
138 	0x00400924,
139 	0x00400928,
140 	0x0040092c,
141 	0x00400930,
142 	0x00400934,
143 	0x00400938,
144 	0x0040093c,
145 	0x00400940,
146 	0x00400944,
147 	0x00400948,
148 	0x0040094c,
149 	0x00400950,
150 	0x00400954,
151 	0x00400958,
152 	0x0040095c,
153 	0x00400960,
154 	0x00400964,
155 	0x00400968,
156 	0x0040096c,
157 	0x00400970,
158 	0x00400974,
159 	0x00400978,
160 	0x0040097c,
161 	0x00400980,
162 	0x00400984,
163 	0x00400988,
164 	0x0040098c,
165 	0x00400990,
166 	0x00400994,
167 	0x00400998,
168 	0x0040099c,
169 	0x004009a0,
170 	0x004009a4,
171 	0x004009a8,
172 	0x004009ac,
173 	0x004009b0,
174 	0x004009b4,
175 	0x004009b8,
176 	0x004009bc,
177 	0x004009c0,
178 	0x004009c4,
179 	0x004009c8,
180 	0x004009cc,
181 	0x004009d0,
182 	0x004009d4,
183 	0x004009d8,
184 	0x004009dc,
185 	0x004009e0,
186 	0x004009e4,
187 	0x004009e8,
188 	0x004009ec,
189 	0x004009f0,
190 	0x004009f4,
191 	0x004009f8,
192 	0x004009fc,
193 	NV04_PGRAPH_PATTERN,	/* 2 values from 0x400808 to 0x40080c */
194 	0x0040080c,
195 	NV04_PGRAPH_PATTERN_SHAPE,
196 	NV03_PGRAPH_MONO_COLOR0,
197 	NV04_PGRAPH_ROP3,
198 	NV04_PGRAPH_CHROMA,
199 	NV04_PGRAPH_BETA_AND,
200 	NV04_PGRAPH_BETA_PREMULT,
201 	0x00400e70,
202 	0x00400e74,
203 	0x00400e78,
204 	0x00400e7c,
205 	0x00400e80,
206 	0x00400e84,
207 	0x00400e88,
208 	0x00400e8c,
209 	0x00400ea0,
210 	0x00400ea4,
211 	0x00400ea8,
212 	0x00400e90,
213 	0x00400e94,
214 	0x00400e98,
215 	0x00400e9c,
216 	NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */
217 	NV10_PGRAPH_WINDOWCLIP_VERTICAL,   /* 8 values from 0x400f20-0x400f3c */
218 	0x00400f04,
219 	0x00400f24,
220 	0x00400f08,
221 	0x00400f28,
222 	0x00400f0c,
223 	0x00400f2c,
224 	0x00400f10,
225 	0x00400f30,
226 	0x00400f14,
227 	0x00400f34,
228 	0x00400f18,
229 	0x00400f38,
230 	0x00400f1c,
231 	0x00400f3c,
232 	NV10_PGRAPH_XFMODE0,
233 	NV10_PGRAPH_XFMODE1,
234 	NV10_PGRAPH_GLOBALSTATE0,
235 	NV10_PGRAPH_GLOBALSTATE1,
236 	NV04_PGRAPH_STORED_FMT,
237 	NV04_PGRAPH_SOURCE_COLOR,
238 	NV03_PGRAPH_ABS_X_RAM,	/* 32 values from 0x400400 to 0x40047c */
239 	NV03_PGRAPH_ABS_Y_RAM,	/* 32 values from 0x400480 to 0x4004fc */
240 	0x00400404,
241 	0x00400484,
242 	0x00400408,
243 	0x00400488,
244 	0x0040040c,
245 	0x0040048c,
246 	0x00400410,
247 	0x00400490,
248 	0x00400414,
249 	0x00400494,
250 	0x00400418,
251 	0x00400498,
252 	0x0040041c,
253 	0x0040049c,
254 	0x00400420,
255 	0x004004a0,
256 	0x00400424,
257 	0x004004a4,
258 	0x00400428,
259 	0x004004a8,
260 	0x0040042c,
261 	0x004004ac,
262 	0x00400430,
263 	0x004004b0,
264 	0x00400434,
265 	0x004004b4,
266 	0x00400438,
267 	0x004004b8,
268 	0x0040043c,
269 	0x004004bc,
270 	0x00400440,
271 	0x004004c0,
272 	0x00400444,
273 	0x004004c4,
274 	0x00400448,
275 	0x004004c8,
276 	0x0040044c,
277 	0x004004cc,
278 	0x00400450,
279 	0x004004d0,
280 	0x00400454,
281 	0x004004d4,
282 	0x00400458,
283 	0x004004d8,
284 	0x0040045c,
285 	0x004004dc,
286 	0x00400460,
287 	0x004004e0,
288 	0x00400464,
289 	0x004004e4,
290 	0x00400468,
291 	0x004004e8,
292 	0x0040046c,
293 	0x004004ec,
294 	0x00400470,
295 	0x004004f0,
296 	0x00400474,
297 	0x004004f4,
298 	0x00400478,
299 	0x004004f8,
300 	0x0040047c,
301 	0x004004fc,
302 	NV03_PGRAPH_ABS_UCLIP_XMIN,
303 	NV03_PGRAPH_ABS_UCLIP_XMAX,
304 	NV03_PGRAPH_ABS_UCLIP_YMIN,
305 	NV03_PGRAPH_ABS_UCLIP_YMAX,
306 	0x00400550,
307 	0x00400558,
308 	0x00400554,
309 	0x0040055c,
310 	NV03_PGRAPH_ABS_UCLIPA_XMIN,
311 	NV03_PGRAPH_ABS_UCLIPA_XMAX,
312 	NV03_PGRAPH_ABS_UCLIPA_YMIN,
313 	NV03_PGRAPH_ABS_UCLIPA_YMAX,
314 	NV03_PGRAPH_ABS_ICLIP_XMAX,
315 	NV03_PGRAPH_ABS_ICLIP_YMAX,
316 	NV03_PGRAPH_XY_LOGIC_MISC0,
317 	NV03_PGRAPH_XY_LOGIC_MISC1,
318 	NV03_PGRAPH_XY_LOGIC_MISC2,
319 	NV03_PGRAPH_XY_LOGIC_MISC3,
320 	NV03_PGRAPH_CLIPX_0,
321 	NV03_PGRAPH_CLIPX_1,
322 	NV03_PGRAPH_CLIPY_0,
323 	NV03_PGRAPH_CLIPY_1,
324 	NV10_PGRAPH_COMBINER0_IN_ALPHA,
325 	NV10_PGRAPH_COMBINER1_IN_ALPHA,
326 	NV10_PGRAPH_COMBINER0_IN_RGB,
327 	NV10_PGRAPH_COMBINER1_IN_RGB,
328 	NV10_PGRAPH_COMBINER_COLOR0,
329 	NV10_PGRAPH_COMBINER_COLOR1,
330 	NV10_PGRAPH_COMBINER0_OUT_ALPHA,
331 	NV10_PGRAPH_COMBINER1_OUT_ALPHA,
332 	NV10_PGRAPH_COMBINER0_OUT_RGB,
333 	NV10_PGRAPH_COMBINER1_OUT_RGB,
334 	NV10_PGRAPH_COMBINER_FINAL0,
335 	NV10_PGRAPH_COMBINER_FINAL1,
336 	0x00400e00,
337 	0x00400e04,
338 	0x00400e08,
339 	0x00400e0c,
340 	0x00400e10,
341 	0x00400e14,
342 	0x00400e18,
343 	0x00400e1c,
344 	0x00400e20,
345 	0x00400e24,
346 	0x00400e28,
347 	0x00400e2c,
348 	0x00400e30,
349 	0x00400e34,
350 	0x00400e38,
351 	0x00400e3c,
352 	NV04_PGRAPH_PASSTHRU_0,
353 	NV04_PGRAPH_PASSTHRU_1,
354 	NV04_PGRAPH_PASSTHRU_2,
355 	NV10_PGRAPH_DIMX_TEXTURE,
356 	NV10_PGRAPH_WDIMX_TEXTURE,
357 	NV10_PGRAPH_DVD_COLORFMT,
358 	NV10_PGRAPH_SCALED_FORMAT,
359 	NV04_PGRAPH_MISC24_0,
360 	NV04_PGRAPH_MISC24_1,
361 	NV04_PGRAPH_MISC24_2,
362 	NV03_PGRAPH_X_MISC,
363 	NV03_PGRAPH_Y_MISC,
364 	NV04_PGRAPH_VALID1,
365 	NV04_PGRAPH_VALID2,
366 };
367 
368 static int nv17_gr_ctx_regs[] = {
369 	NV10_PGRAPH_DEBUG_4,
370 	0x004006b0,
371 	0x00400eac,
372 	0x00400eb0,
373 	0x00400eb4,
374 	0x00400eb8,
375 	0x00400ebc,
376 	0x00400ec0,
377 	0x00400ec4,
378 	0x00400ec8,
379 	0x00400ecc,
380 	0x00400ed0,
381 	0x00400ed4,
382 	0x00400ed8,
383 	0x00400edc,
384 	0x00400ee0,
385 	0x00400a00,
386 	0x00400a04,
387 };
388 
389 #define nv10_gr(p) container_of((p), struct nv10_gr, base)
390 
391 struct nv10_gr {
392 	struct nvkm_gr base;
393 	struct nv10_gr_chan *chan[32];
394 	spinlock_t lock;
395 };
396 
397 #define nv10_gr_chan(p) container_of((p), struct nv10_gr_chan, object)
398 
399 struct nv10_gr_chan {
400 	struct nvkm_object object;
401 	struct nv10_gr *gr;
402 	int chid;
403 	int nv10[ARRAY_SIZE(nv10_gr_ctx_regs)];
404 	int nv17[ARRAY_SIZE(nv17_gr_ctx_regs)];
405 	struct pipe_state pipe_state;
406 	u32 lma_window[4];
407 };
408 
409 
410 /*******************************************************************************
411  * Graphics object classes
412  ******************************************************************************/
413 
414 #define PIPE_SAVE(gr, state, addr)					\
415 	do {								\
416 		int __i;						\
417 		nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
418 		for (__i = 0; __i < ARRAY_SIZE(state); __i++)		\
419 			state[__i] = nvkm_rd32(device, NV10_PGRAPH_PIPE_DATA); \
420 	} while (0)
421 
422 #define PIPE_RESTORE(gr, state, addr)					\
423 	do {								\
424 		int __i;						\
425 		nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
426 		for (__i = 0; __i < ARRAY_SIZE(state); __i++)		\
427 			nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, state[__i]); \
428 	} while (0)
429 
430 static void
431 nv17_gr_mthd_lma_window(struct nv10_gr_chan *chan, u32 mthd, u32 data)
432 {
433 	struct nvkm_device *device = chan->object.engine->subdev.device;
434 	struct nvkm_gr *gr = &chan->gr->base;
435 	struct pipe_state *pipe = &chan->pipe_state;
436 	u32 pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
437 	u32 xfmode0, xfmode1;
438 	int i;
439 
440 	chan->lma_window[(mthd - 0x1638) / 4] = data;
441 
442 	if (mthd != 0x1644)
443 		return;
444 
445 	nv04_gr_idle(gr);
446 
447 	PIPE_SAVE(device, pipe_0x0040, 0x0040);
448 	PIPE_SAVE(device, pipe->pipe_0x0200, 0x0200);
449 
450 	PIPE_RESTORE(device, chan->lma_window, 0x6790);
451 
452 	nv04_gr_idle(gr);
453 
454 	xfmode0 = nvkm_rd32(device, NV10_PGRAPH_XFMODE0);
455 	xfmode1 = nvkm_rd32(device, NV10_PGRAPH_XFMODE1);
456 
457 	PIPE_SAVE(device, pipe->pipe_0x4400, 0x4400);
458 	PIPE_SAVE(device, pipe_0x64c0, 0x64c0);
459 	PIPE_SAVE(device, pipe_0x6ab0, 0x6ab0);
460 	PIPE_SAVE(device, pipe_0x6a80, 0x6a80);
461 
462 	nv04_gr_idle(gr);
463 
464 	nvkm_wr32(device, NV10_PGRAPH_XFMODE0, 0x10000000);
465 	nvkm_wr32(device, NV10_PGRAPH_XFMODE1, 0x00000000);
466 	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
467 	for (i = 0; i < 4; i++)
468 		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
469 	for (i = 0; i < 4; i++)
470 		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
471 
472 	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
473 	for (i = 0; i < 3; i++)
474 		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
475 
476 	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
477 	for (i = 0; i < 3; i++)
478 		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
479 
480 	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
481 	nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000008);
482 
483 	PIPE_RESTORE(device, pipe->pipe_0x0200, 0x0200);
484 
485 	nv04_gr_idle(gr);
486 
487 	PIPE_RESTORE(device, pipe_0x0040, 0x0040);
488 
489 	nvkm_wr32(device, NV10_PGRAPH_XFMODE0, xfmode0);
490 	nvkm_wr32(device, NV10_PGRAPH_XFMODE1, xfmode1);
491 
492 	PIPE_RESTORE(device, pipe_0x64c0, 0x64c0);
493 	PIPE_RESTORE(device, pipe_0x6ab0, 0x6ab0);
494 	PIPE_RESTORE(device, pipe_0x6a80, 0x6a80);
495 	PIPE_RESTORE(device, pipe->pipe_0x4400, 0x4400);
496 
497 	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
498 	nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
499 
500 	nv04_gr_idle(gr);
501 }
502 
503 static void
504 nv17_gr_mthd_lma_enable(struct nv10_gr_chan *chan, u32 mthd, u32 data)
505 {
506 	struct nvkm_device *device = chan->object.engine->subdev.device;
507 	struct nvkm_gr *gr = &chan->gr->base;
508 
509 	nv04_gr_idle(gr);
510 
511 	nvkm_mask(device, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100);
512 	nvkm_mask(device, 0x4006b0, 0x08000000, 0x08000000);
513 }
514 
515 static bool
516 nv17_gr_mthd_celcius(struct nv10_gr_chan *chan, u32 mthd, u32 data)
517 {
518 	void (*func)(struct nv10_gr_chan *, u32, u32);
519 	switch (mthd) {
520 	case 0x1638 ... 0x1644:
521 		     func = nv17_gr_mthd_lma_window; break;
522 	case 0x1658: func = nv17_gr_mthd_lma_enable; break;
523 	default:
524 		return false;
525 	}
526 	func(chan, mthd, data);
527 	return true;
528 }
529 
530 static bool
531 nv10_gr_mthd(struct nv10_gr_chan *chan, u8 class, u32 mthd, u32 data)
532 {
533 	bool (*func)(struct nv10_gr_chan *, u32, u32);
534 	switch (class) {
535 	case 0x99: func = nv17_gr_mthd_celcius; break;
536 	default:
537 		return false;
538 	}
539 	return func(chan, mthd, data);
540 }
541 
542 /*******************************************************************************
543  * PGRAPH context
544  ******************************************************************************/
545 
546 static struct nv10_gr_chan *
547 nv10_gr_channel(struct nv10_gr *gr)
548 {
549 	struct nvkm_device *device = gr->base.engine.subdev.device;
550 	struct nv10_gr_chan *chan = NULL;
551 	if (nvkm_rd32(device, 0x400144) & 0x00010000) {
552 		int chid = nvkm_rd32(device, 0x400148) >> 24;
553 		if (chid < ARRAY_SIZE(gr->chan))
554 			chan = gr->chan[chid];
555 	}
556 	return chan;
557 }
558 
559 static void
560 nv10_gr_save_pipe(struct nv10_gr_chan *chan)
561 {
562 	struct nv10_gr *gr = chan->gr;
563 	struct pipe_state *pipe = &chan->pipe_state;
564 	struct nvkm_device *device = gr->base.engine.subdev.device;
565 
566 	PIPE_SAVE(gr, pipe->pipe_0x4400, 0x4400);
567 	PIPE_SAVE(gr, pipe->pipe_0x0200, 0x0200);
568 	PIPE_SAVE(gr, pipe->pipe_0x6400, 0x6400);
569 	PIPE_SAVE(gr, pipe->pipe_0x6800, 0x6800);
570 	PIPE_SAVE(gr, pipe->pipe_0x6c00, 0x6c00);
571 	PIPE_SAVE(gr, pipe->pipe_0x7000, 0x7000);
572 	PIPE_SAVE(gr, pipe->pipe_0x7400, 0x7400);
573 	PIPE_SAVE(gr, pipe->pipe_0x7800, 0x7800);
574 	PIPE_SAVE(gr, pipe->pipe_0x0040, 0x0040);
575 	PIPE_SAVE(gr, pipe->pipe_0x0000, 0x0000);
576 }
577 
578 static void
579 nv10_gr_load_pipe(struct nv10_gr_chan *chan)
580 {
581 	struct nv10_gr *gr = chan->gr;
582 	struct pipe_state *pipe = &chan->pipe_state;
583 	struct nvkm_device *device = gr->base.engine.subdev.device;
584 	u32 xfmode0, xfmode1;
585 	int i;
586 
587 	nv04_gr_idle(&gr->base);
588 	/* XXX check haiku comments */
589 	xfmode0 = nvkm_rd32(device, NV10_PGRAPH_XFMODE0);
590 	xfmode1 = nvkm_rd32(device, NV10_PGRAPH_XFMODE1);
591 	nvkm_wr32(device, NV10_PGRAPH_XFMODE0, 0x10000000);
592 	nvkm_wr32(device, NV10_PGRAPH_XFMODE1, 0x00000000);
593 	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
594 	for (i = 0; i < 4; i++)
595 		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
596 	for (i = 0; i < 4; i++)
597 		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
598 
599 	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
600 	for (i = 0; i < 3; i++)
601 		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
602 
603 	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
604 	for (i = 0; i < 3; i++)
605 		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
606 
607 	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
608 	nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000008);
609 
610 
611 	PIPE_RESTORE(gr, pipe->pipe_0x0200, 0x0200);
612 	nv04_gr_idle(&gr->base);
613 
614 	/* restore XFMODE */
615 	nvkm_wr32(device, NV10_PGRAPH_XFMODE0, xfmode0);
616 	nvkm_wr32(device, NV10_PGRAPH_XFMODE1, xfmode1);
617 	PIPE_RESTORE(gr, pipe->pipe_0x6400, 0x6400);
618 	PIPE_RESTORE(gr, pipe->pipe_0x6800, 0x6800);
619 	PIPE_RESTORE(gr, pipe->pipe_0x6c00, 0x6c00);
620 	PIPE_RESTORE(gr, pipe->pipe_0x7000, 0x7000);
621 	PIPE_RESTORE(gr, pipe->pipe_0x7400, 0x7400);
622 	PIPE_RESTORE(gr, pipe->pipe_0x7800, 0x7800);
623 	PIPE_RESTORE(gr, pipe->pipe_0x4400, 0x4400);
624 	PIPE_RESTORE(gr, pipe->pipe_0x0000, 0x0000);
625 	PIPE_RESTORE(gr, pipe->pipe_0x0040, 0x0040);
626 	nv04_gr_idle(&gr->base);
627 }
628 
629 static void
630 nv10_gr_create_pipe(struct nv10_gr_chan *chan)
631 {
632 	struct nv10_gr *gr = chan->gr;
633 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
634 	struct pipe_state *pipe_state = &chan->pipe_state;
635 	u32 *pipe_state_addr;
636 	int i;
637 #define PIPE_INIT(addr) \
638 	do { \
639 		pipe_state_addr = pipe_state->pipe_##addr; \
640 	} while (0)
641 #define PIPE_INIT_END(addr) \
642 	do { \
643 		u32 *__end_addr = pipe_state->pipe_##addr + \
644 				ARRAY_SIZE(pipe_state->pipe_##addr); \
645 		if (pipe_state_addr != __end_addr) \
646 			nvkm_error(subdev, "incomplete pipe init for 0x%x :  %p/%p\n", \
647 				addr, pipe_state_addr, __end_addr); \
648 	} while (0)
649 #define NV_WRITE_PIPE_INIT(value) *(pipe_state_addr++) = value
650 
651 	PIPE_INIT(0x0200);
652 	for (i = 0; i < 48; i++)
653 		NV_WRITE_PIPE_INIT(0x00000000);
654 	PIPE_INIT_END(0x0200);
655 
656 	PIPE_INIT(0x6400);
657 	for (i = 0; i < 211; i++)
658 		NV_WRITE_PIPE_INIT(0x00000000);
659 	NV_WRITE_PIPE_INIT(0x3f800000);
660 	NV_WRITE_PIPE_INIT(0x40000000);
661 	NV_WRITE_PIPE_INIT(0x40000000);
662 	NV_WRITE_PIPE_INIT(0x40000000);
663 	NV_WRITE_PIPE_INIT(0x40000000);
664 	NV_WRITE_PIPE_INIT(0x00000000);
665 	NV_WRITE_PIPE_INIT(0x00000000);
666 	NV_WRITE_PIPE_INIT(0x3f800000);
667 	NV_WRITE_PIPE_INIT(0x00000000);
668 	NV_WRITE_PIPE_INIT(0x3f000000);
669 	NV_WRITE_PIPE_INIT(0x3f000000);
670 	NV_WRITE_PIPE_INIT(0x00000000);
671 	NV_WRITE_PIPE_INIT(0x00000000);
672 	NV_WRITE_PIPE_INIT(0x00000000);
673 	NV_WRITE_PIPE_INIT(0x00000000);
674 	NV_WRITE_PIPE_INIT(0x3f800000);
675 	NV_WRITE_PIPE_INIT(0x00000000);
676 	NV_WRITE_PIPE_INIT(0x00000000);
677 	NV_WRITE_PIPE_INIT(0x00000000);
678 	NV_WRITE_PIPE_INIT(0x00000000);
679 	NV_WRITE_PIPE_INIT(0x00000000);
680 	NV_WRITE_PIPE_INIT(0x3f800000);
681 	NV_WRITE_PIPE_INIT(0x3f800000);
682 	NV_WRITE_PIPE_INIT(0x3f800000);
683 	NV_WRITE_PIPE_INIT(0x3f800000);
684 	PIPE_INIT_END(0x6400);
685 
686 	PIPE_INIT(0x6800);
687 	for (i = 0; i < 162; i++)
688 		NV_WRITE_PIPE_INIT(0x00000000);
689 	NV_WRITE_PIPE_INIT(0x3f800000);
690 	for (i = 0; i < 25; i++)
691 		NV_WRITE_PIPE_INIT(0x00000000);
692 	PIPE_INIT_END(0x6800);
693 
694 	PIPE_INIT(0x6c00);
695 	NV_WRITE_PIPE_INIT(0x00000000);
696 	NV_WRITE_PIPE_INIT(0x00000000);
697 	NV_WRITE_PIPE_INIT(0x00000000);
698 	NV_WRITE_PIPE_INIT(0x00000000);
699 	NV_WRITE_PIPE_INIT(0xbf800000);
700 	NV_WRITE_PIPE_INIT(0x00000000);
701 	NV_WRITE_PIPE_INIT(0x00000000);
702 	NV_WRITE_PIPE_INIT(0x00000000);
703 	NV_WRITE_PIPE_INIT(0x00000000);
704 	NV_WRITE_PIPE_INIT(0x00000000);
705 	NV_WRITE_PIPE_INIT(0x00000000);
706 	NV_WRITE_PIPE_INIT(0x00000000);
707 	PIPE_INIT_END(0x6c00);
708 
709 	PIPE_INIT(0x7000);
710 	NV_WRITE_PIPE_INIT(0x00000000);
711 	NV_WRITE_PIPE_INIT(0x00000000);
712 	NV_WRITE_PIPE_INIT(0x00000000);
713 	NV_WRITE_PIPE_INIT(0x00000000);
714 	NV_WRITE_PIPE_INIT(0x00000000);
715 	NV_WRITE_PIPE_INIT(0x00000000);
716 	NV_WRITE_PIPE_INIT(0x00000000);
717 	NV_WRITE_PIPE_INIT(0x00000000);
718 	NV_WRITE_PIPE_INIT(0x00000000);
719 	NV_WRITE_PIPE_INIT(0x00000000);
720 	NV_WRITE_PIPE_INIT(0x00000000);
721 	NV_WRITE_PIPE_INIT(0x00000000);
722 	NV_WRITE_PIPE_INIT(0x7149f2ca);
723 	NV_WRITE_PIPE_INIT(0x00000000);
724 	NV_WRITE_PIPE_INIT(0x00000000);
725 	NV_WRITE_PIPE_INIT(0x00000000);
726 	NV_WRITE_PIPE_INIT(0x7149f2ca);
727 	NV_WRITE_PIPE_INIT(0x00000000);
728 	NV_WRITE_PIPE_INIT(0x00000000);
729 	NV_WRITE_PIPE_INIT(0x00000000);
730 	NV_WRITE_PIPE_INIT(0x7149f2ca);
731 	NV_WRITE_PIPE_INIT(0x00000000);
732 	NV_WRITE_PIPE_INIT(0x00000000);
733 	NV_WRITE_PIPE_INIT(0x00000000);
734 	NV_WRITE_PIPE_INIT(0x7149f2ca);
735 	NV_WRITE_PIPE_INIT(0x00000000);
736 	NV_WRITE_PIPE_INIT(0x00000000);
737 	NV_WRITE_PIPE_INIT(0x00000000);
738 	NV_WRITE_PIPE_INIT(0x7149f2ca);
739 	NV_WRITE_PIPE_INIT(0x00000000);
740 	NV_WRITE_PIPE_INIT(0x00000000);
741 	NV_WRITE_PIPE_INIT(0x00000000);
742 	NV_WRITE_PIPE_INIT(0x7149f2ca);
743 	NV_WRITE_PIPE_INIT(0x00000000);
744 	NV_WRITE_PIPE_INIT(0x00000000);
745 	NV_WRITE_PIPE_INIT(0x00000000);
746 	NV_WRITE_PIPE_INIT(0x7149f2ca);
747 	NV_WRITE_PIPE_INIT(0x00000000);
748 	NV_WRITE_PIPE_INIT(0x00000000);
749 	NV_WRITE_PIPE_INIT(0x00000000);
750 	NV_WRITE_PIPE_INIT(0x7149f2ca);
751 	for (i = 0; i < 35; i++)
752 		NV_WRITE_PIPE_INIT(0x00000000);
753 	PIPE_INIT_END(0x7000);
754 
755 	PIPE_INIT(0x7400);
756 	for (i = 0; i < 48; i++)
757 		NV_WRITE_PIPE_INIT(0x00000000);
758 	PIPE_INIT_END(0x7400);
759 
760 	PIPE_INIT(0x7800);
761 	for (i = 0; i < 48; i++)
762 		NV_WRITE_PIPE_INIT(0x00000000);
763 	PIPE_INIT_END(0x7800);
764 
765 	PIPE_INIT(0x4400);
766 	for (i = 0; i < 32; i++)
767 		NV_WRITE_PIPE_INIT(0x00000000);
768 	PIPE_INIT_END(0x4400);
769 
770 	PIPE_INIT(0x0000);
771 	for (i = 0; i < 16; i++)
772 		NV_WRITE_PIPE_INIT(0x00000000);
773 	PIPE_INIT_END(0x0000);
774 
775 	PIPE_INIT(0x0040);
776 	for (i = 0; i < 4; i++)
777 		NV_WRITE_PIPE_INIT(0x00000000);
778 	PIPE_INIT_END(0x0040);
779 
780 #undef PIPE_INIT
781 #undef PIPE_INIT_END
782 #undef NV_WRITE_PIPE_INIT
783 }
784 
785 static int
786 nv10_gr_ctx_regs_find_offset(struct nv10_gr *gr, int reg)
787 {
788 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
789 	int i;
790 	for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++) {
791 		if (nv10_gr_ctx_regs[i] == reg)
792 			return i;
793 	}
794 	nvkm_error(subdev, "unknown offset nv10_ctx_regs %d\n", reg);
795 	return -1;
796 }
797 
798 static int
799 nv17_gr_ctx_regs_find_offset(struct nv10_gr *gr, int reg)
800 {
801 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
802 	int i;
803 	for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++) {
804 		if (nv17_gr_ctx_regs[i] == reg)
805 			return i;
806 	}
807 	nvkm_error(subdev, "unknown offset nv17_ctx_regs %d\n", reg);
808 	return -1;
809 }
810 
811 static void
812 nv10_gr_load_dma_vtxbuf(struct nv10_gr_chan *chan, int chid, u32 inst)
813 {
814 	struct nv10_gr *gr = chan->gr;
815 	struct nvkm_device *device = gr->base.engine.subdev.device;
816 	u32 st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
817 	u32 ctx_user, ctx_switch[5];
818 	int i, subchan = -1;
819 
820 	/* NV10TCL_DMA_VTXBUF (method 0x18c) modifies hidden state
821 	 * that cannot be restored via MMIO. Do it through the FIFO
822 	 * instead.
823 	 */
824 
825 	/* Look for a celsius object */
826 	for (i = 0; i < 8; i++) {
827 		int class = nvkm_rd32(device, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
828 
829 		if (class == 0x56 || class == 0x96 || class == 0x99) {
830 			subchan = i;
831 			break;
832 		}
833 	}
834 
835 	if (subchan < 0 || !inst)
836 		return;
837 
838 	/* Save the current ctx object */
839 	ctx_user = nvkm_rd32(device, NV10_PGRAPH_CTX_USER);
840 	for (i = 0; i < 5; i++)
841 		ctx_switch[i] = nvkm_rd32(device, NV10_PGRAPH_CTX_SWITCH(i));
842 
843 	/* Save the FIFO state */
844 	st2 = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2);
845 	st2_dl = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2_DL);
846 	st2_dh = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2_DH);
847 	fifo_ptr = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR);
848 
849 	for (i = 0; i < ARRAY_SIZE(fifo); i++)
850 		fifo[i] = nvkm_rd32(device, 0x4007a0 + 4 * i);
851 
852 	/* Switch to the celsius subchannel */
853 	for (i = 0; i < 5; i++)
854 		nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(i),
855 			nvkm_rd32(device, NV10_PGRAPH_CTX_CACHE(subchan, i)));
856 	nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
857 
858 	/* Inject NV10TCL_DMA_VTXBUF */
859 	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
860 	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2,
861 		0x2c000000 | chid << 20 | subchan << 16 | 0x18c);
862 	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
863 	nvkm_mask(device, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
864 	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
865 	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
866 
867 	/* Restore the FIFO state */
868 	for (i = 0; i < ARRAY_SIZE(fifo); i++)
869 		nvkm_wr32(device, 0x4007a0 + 4 * i, fifo[i]);
870 
871 	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
872 	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2, st2);
873 	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
874 	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
875 
876 	/* Restore the current ctx object */
877 	for (i = 0; i < 5; i++)
878 		nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
879 	nvkm_wr32(device, NV10_PGRAPH_CTX_USER, ctx_user);
880 }
881 
882 static int
883 nv10_gr_load_context(struct nv10_gr_chan *chan, int chid)
884 {
885 	struct nv10_gr *gr = chan->gr;
886 	struct nvkm_device *device = gr->base.engine.subdev.device;
887 	u32 inst;
888 	int i;
889 
890 	for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++)
891 		nvkm_wr32(device, nv10_gr_ctx_regs[i], chan->nv10[i]);
892 
893 	if (device->card_type >= NV_11 && device->chipset >= 0x17) {
894 		for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++)
895 			nvkm_wr32(device, nv17_gr_ctx_regs[i], chan->nv17[i]);
896 	}
897 
898 	nv10_gr_load_pipe(chan);
899 
900 	inst = nvkm_rd32(device, NV10_PGRAPH_GLOBALSTATE1) & 0xffff;
901 	nv10_gr_load_dma_vtxbuf(chan, chid, inst);
902 
903 	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
904 	nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24);
905 	nvkm_mask(device, NV10_PGRAPH_FFINTFC_ST2, 0x30000000, 0x00000000);
906 	return 0;
907 }
908 
909 static int
910 nv10_gr_unload_context(struct nv10_gr_chan *chan)
911 {
912 	struct nv10_gr *gr = chan->gr;
913 	struct nvkm_device *device = gr->base.engine.subdev.device;
914 	int i;
915 
916 	for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++)
917 		chan->nv10[i] = nvkm_rd32(device, nv10_gr_ctx_regs[i]);
918 
919 	if (device->card_type >= NV_11 && device->chipset >= 0x17) {
920 		for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++)
921 			chan->nv17[i] = nvkm_rd32(device, nv17_gr_ctx_regs[i]);
922 	}
923 
924 	nv10_gr_save_pipe(chan);
925 
926 	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
927 	nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
928 	return 0;
929 }
930 
931 static void
932 nv10_gr_context_switch(struct nv10_gr *gr)
933 {
934 	struct nvkm_device *device = gr->base.engine.subdev.device;
935 	struct nv10_gr_chan *prev = NULL;
936 	struct nv10_gr_chan *next = NULL;
937 	int chid;
938 
939 	nv04_gr_idle(&gr->base);
940 
941 	/* If previous context is valid, we need to save it */
942 	prev = nv10_gr_channel(gr);
943 	if (prev)
944 		nv10_gr_unload_context(prev);
945 
946 	/* load context for next channel */
947 	chid = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
948 	next = gr->chan[chid];
949 	if (next)
950 		nv10_gr_load_context(next, chid);
951 }
952 
953 static int
954 nv10_gr_chan_fini(struct nvkm_object *object, bool suspend)
955 {
956 	struct nv10_gr_chan *chan = nv10_gr_chan(object);
957 	struct nv10_gr *gr = chan->gr;
958 	struct nvkm_device *device = gr->base.engine.subdev.device;
959 	unsigned long flags;
960 
961 	spin_lock_irqsave(&gr->lock, flags);
962 	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
963 	if (nv10_gr_channel(gr) == chan)
964 		nv10_gr_unload_context(chan);
965 	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
966 	spin_unlock_irqrestore(&gr->lock, flags);
967 	return 0;
968 }
969 
970 static void *
971 nv10_gr_chan_dtor(struct nvkm_object *object)
972 {
973 	struct nv10_gr_chan *chan = nv10_gr_chan(object);
974 	struct nv10_gr *gr = chan->gr;
975 	unsigned long flags;
976 
977 	spin_lock_irqsave(&gr->lock, flags);
978 	gr->chan[chan->chid] = NULL;
979 	spin_unlock_irqrestore(&gr->lock, flags);
980 	return chan;
981 }
982 
983 static const struct nvkm_object_func
984 nv10_gr_chan = {
985 	.dtor = nv10_gr_chan_dtor,
986 	.fini = nv10_gr_chan_fini,
987 };
988 
989 #define NV_WRITE_CTX(reg, val) do { \
990 	int offset = nv10_gr_ctx_regs_find_offset(gr, reg); \
991 	if (offset > 0) \
992 		chan->nv10[offset] = val; \
993 	} while (0)
994 
995 #define NV17_WRITE_CTX(reg, val) do { \
996 	int offset = nv17_gr_ctx_regs_find_offset(gr, reg); \
997 	if (offset > 0) \
998 		chan->nv17[offset] = val; \
999 	} while (0)
1000 
1001 int
1002 nv10_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
1003 		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
1004 {
1005 	struct nv10_gr *gr = nv10_gr(base);
1006 	struct nv10_gr_chan *chan;
1007 	struct nvkm_device *device = gr->base.engine.subdev.device;
1008 	unsigned long flags;
1009 
1010 	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
1011 		return -ENOMEM;
1012 	nvkm_object_ctor(&nv10_gr_chan, oclass, &chan->object);
1013 	chan->gr = gr;
1014 	chan->chid = fifoch->chid;
1015 	*pobject = &chan->object;
1016 
1017 	NV_WRITE_CTX(0x00400e88, 0x08000000);
1018 	NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
1019 	NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
1020 	NV_WRITE_CTX(0x00400e10, 0x00001000);
1021 	NV_WRITE_CTX(0x00400e14, 0x00001000);
1022 	NV_WRITE_CTX(0x00400e30, 0x00080008);
1023 	NV_WRITE_CTX(0x00400e34, 0x00080008);
1024 	if (device->card_type >= NV_11 && device->chipset >= 0x17) {
1025 		/* is it really needed ??? */
1026 		NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
1027 			       nvkm_rd32(device, NV10_PGRAPH_DEBUG_4));
1028 		NV17_WRITE_CTX(0x004006b0, nvkm_rd32(device, 0x004006b0));
1029 		NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
1030 		NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
1031 		NV17_WRITE_CTX(0x00400ec0, 0x00000080);
1032 		NV17_WRITE_CTX(0x00400ed0, 0x00000080);
1033 	}
1034 	NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->chid << 24);
1035 
1036 	nv10_gr_create_pipe(chan);
1037 
1038 	spin_lock_irqsave(&gr->lock, flags);
1039 	gr->chan[chan->chid] = chan;
1040 	spin_unlock_irqrestore(&gr->lock, flags);
1041 	return 0;
1042 }
1043 
1044 /*******************************************************************************
1045  * PGRAPH engine/subdev functions
1046  ******************************************************************************/
1047 
1048 void
1049 nv10_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
1050 {
1051 	struct nv10_gr *gr = nv10_gr(base);
1052 	struct nvkm_device *device = gr->base.engine.subdev.device;
1053 	struct nvkm_fifo *fifo = device->fifo;
1054 	unsigned long flags;
1055 
1056 	nvkm_fifo_pause(fifo, &flags);
1057 	nv04_gr_idle(&gr->base);
1058 
1059 	nvkm_wr32(device, NV10_PGRAPH_TLIMIT(i), tile->limit);
1060 	nvkm_wr32(device, NV10_PGRAPH_TSIZE(i), tile->pitch);
1061 	nvkm_wr32(device, NV10_PGRAPH_TILE(i), tile->addr);
1062 
1063 	nvkm_fifo_start(fifo, &flags);
1064 }
1065 
1066 const struct nvkm_bitfield nv10_gr_intr_name[] = {
1067 	{ NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
1068 	{ NV_PGRAPH_INTR_ERROR,  "ERROR"  },
1069 	{}
1070 };
1071 
1072 const struct nvkm_bitfield nv10_gr_nstatus[] = {
1073 	{ NV10_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
1074 	{ NV10_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
1075 	{ NV10_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
1076 	{ NV10_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" },
1077 	{}
1078 };
1079 
1080 void
1081 nv10_gr_intr(struct nvkm_gr *base)
1082 {
1083 	struct nv10_gr *gr = nv10_gr(base);
1084 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
1085 	struct nvkm_device *device = subdev->device;
1086 	u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
1087 	u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
1088 	u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
1089 	u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
1090 	u32 chid = (addr & 0x01f00000) >> 20;
1091 	u32 subc = (addr & 0x00070000) >> 16;
1092 	u32 mthd = (addr & 0x00001ffc);
1093 	u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
1094 	u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xfff;
1095 	u32 show = stat;
1096 	char msg[128], src[128], sta[128];
1097 	struct nv10_gr_chan *chan;
1098 	unsigned long flags;
1099 
1100 	spin_lock_irqsave(&gr->lock, flags);
1101 	chan = gr->chan[chid];
1102 
1103 	if (stat & NV_PGRAPH_INTR_ERROR) {
1104 		if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
1105 			if (!nv10_gr_mthd(chan, class, mthd, data))
1106 				show &= ~NV_PGRAPH_INTR_ERROR;
1107 		}
1108 	}
1109 
1110 	if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1111 		nvkm_wr32(device, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1112 		stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1113 		show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1114 		nv10_gr_context_switch(gr);
1115 	}
1116 
1117 	nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
1118 	nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
1119 
1120 	if (show) {
1121 		nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
1122 		nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
1123 		nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
1124 		nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
1125 				   "nstatus %08x [%s] ch %d [%s] subc %d "
1126 				   "class %04x mthd %04x data %08x\n",
1127 			   show, msg, nsource, src, nstatus, sta, chid,
1128 			   chan ? chan->object.client->name : "unknown",
1129 			   subc, class, mthd, data);
1130 	}
1131 
1132 	spin_unlock_irqrestore(&gr->lock, flags);
1133 }
1134 
1135 int
1136 nv10_gr_init(struct nvkm_gr *base)
1137 {
1138 	struct nv10_gr *gr = nv10_gr(base);
1139 	struct nvkm_device *device = gr->base.engine.subdev.device;
1140 
1141 	nvkm_wr32(device, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
1142 	nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
1143 
1144 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
1145 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
1146 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x00118700);
1147 	/* nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
1148 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
1149 	nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
1150 
1151 	if (device->card_type >= NV_11 && device->chipset >= 0x17) {
1152 		nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x1f000000);
1153 		nvkm_wr32(device, 0x400a10, 0x03ff3fb6);
1154 		nvkm_wr32(device, 0x400838, 0x002f8684);
1155 		nvkm_wr32(device, 0x40083c, 0x00115f3f);
1156 		nvkm_wr32(device, 0x4006b0, 0x40000020);
1157 	} else {
1158 		nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00000000);
1159 	}
1160 
1161 	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
1162 	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
1163 	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
1164 	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
1165 	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
1166 	nvkm_wr32(device, NV10_PGRAPH_STATE, 0xFFFFFFFF);
1167 
1168 	nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
1169 	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
1170 	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
1171 	return 0;
1172 }
1173 
1174 int
1175 nv10_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
1176 	     enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
1177 {
1178 	struct nv10_gr *gr;
1179 
1180 	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
1181 		return -ENOMEM;
1182 	spin_lock_init(&gr->lock);
1183 	*pgr = &gr->base;
1184 
1185 	return nvkm_gr_ctor(func, device, type, inst, true, &gr->base);
1186 }
1187 
1188 static const struct nvkm_gr_func
1189 nv10_gr = {
1190 	.init = nv10_gr_init,
1191 	.intr = nv10_gr_intr,
1192 	.tile = nv10_gr_tile,
1193 	.chan_new = nv10_gr_chan_new,
1194 	.sclass = {
1195 		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
1196 		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
1197 		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
1198 		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
1199 		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
1200 		{ -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
1201 		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
1202 		{ -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
1203 		{ -1, -1, 0x005f, &nv04_gr_object }, /* blit */
1204 		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
1205 		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
1206 		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
1207 		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
1208 		{ -1, -1, 0x009f, &nv04_gr_object }, /* blit */
1209 		{ -1, -1, 0x0093, &nv04_gr_object }, /* surf3d */
1210 		{ -1, -1, 0x0094, &nv04_gr_object }, /* ttri */
1211 		{ -1, -1, 0x0095, &nv04_gr_object }, /* mtri */
1212 		{ -1, -1, 0x0056, &nv04_gr_object }, /* celcius */
1213 		{}
1214 	}
1215 };
1216 
1217 int
1218 nv10_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
1219 {
1220 	return nv10_gr_new_(&nv10_gr, device, type, inst, pgr);
1221 }
1222