1 #include "nv20.h"
2 #include "regs.h"
3 
4 #include <core/gpuobj.h>
5 #include <engine/fifo.h>
6 #include <engine/fifo/chan.h>
7 
8 /*******************************************************************************
9  * PGRAPH context
10  ******************************************************************************/
11 
12 static const struct nvkm_object_func
13 nv25_gr_chan = {
14 	.dtor = nv20_gr_chan_dtor,
15 	.init = nv20_gr_chan_init,
16 	.fini = nv20_gr_chan_fini,
17 };
18 
19 static int
20 nv25_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
21 		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
22 {
23 	struct nv20_gr *gr = nv20_gr(base);
24 	struct nv20_gr_chan *chan;
25 	int ret, i;
26 
27 	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
28 		return -ENOMEM;
29 	nvkm_object_ctor(&nv25_gr_chan, oclass, &chan->object);
30 	chan->gr = gr;
31 	chan->chid = fifoch->chid;
32 	*pobject = &chan->object;
33 
34 	ret = nvkm_memory_new(gr->base.engine.subdev.device,
35 			      NVKM_MEM_TARGET_INST, 0x3724, 16, true,
36 			      &chan->inst);
37 	if (ret)
38 		return ret;
39 
40 	nvkm_kmap(chan->inst);
41 	nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
42 	nvkm_wo32(chan->inst, 0x035c, 0xffff0000);
43 	nvkm_wo32(chan->inst, 0x03c0, 0x0fff0000);
44 	nvkm_wo32(chan->inst, 0x03c4, 0x0fff0000);
45 	nvkm_wo32(chan->inst, 0x049c, 0x00000101);
46 	nvkm_wo32(chan->inst, 0x04b0, 0x00000111);
47 	nvkm_wo32(chan->inst, 0x04c8, 0x00000080);
48 	nvkm_wo32(chan->inst, 0x04cc, 0xffff0000);
49 	nvkm_wo32(chan->inst, 0x04d0, 0x00000001);
50 	nvkm_wo32(chan->inst, 0x04e4, 0x44400000);
51 	nvkm_wo32(chan->inst, 0x04fc, 0x4b800000);
52 	for (i = 0x0510; i <= 0x051c; i += 4)
53 		nvkm_wo32(chan->inst, i, 0x00030303);
54 	for (i = 0x0530; i <= 0x053c; i += 4)
55 		nvkm_wo32(chan->inst, i, 0x00080000);
56 	for (i = 0x0548; i <= 0x0554; i += 4)
57 		nvkm_wo32(chan->inst, i, 0x01012000);
58 	for (i = 0x0558; i <= 0x0564; i += 4)
59 		nvkm_wo32(chan->inst, i, 0x000105b8);
60 	for (i = 0x0568; i <= 0x0574; i += 4)
61 		nvkm_wo32(chan->inst, i, 0x00080008);
62 	for (i = 0x0598; i <= 0x05d4; i += 4)
63 		nvkm_wo32(chan->inst, i, 0x07ff0000);
64 	nvkm_wo32(chan->inst, 0x05e0, 0x4b7fffff);
65 	nvkm_wo32(chan->inst, 0x0620, 0x00000080);
66 	nvkm_wo32(chan->inst, 0x0624, 0x30201000);
67 	nvkm_wo32(chan->inst, 0x0628, 0x70605040);
68 	nvkm_wo32(chan->inst, 0x062c, 0xb0a09080);
69 	nvkm_wo32(chan->inst, 0x0630, 0xf0e0d0c0);
70 	nvkm_wo32(chan->inst, 0x0664, 0x00000001);
71 	nvkm_wo32(chan->inst, 0x066c, 0x00004000);
72 	nvkm_wo32(chan->inst, 0x0678, 0x00000001);
73 	nvkm_wo32(chan->inst, 0x0680, 0x00040000);
74 	nvkm_wo32(chan->inst, 0x0684, 0x00010000);
75 	for (i = 0x1b04; i <= 0x2374; i += 16) {
76 		nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
77 		nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
78 		nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
79 	}
80 	nvkm_wo32(chan->inst, 0x2704, 0x3f800000);
81 	nvkm_wo32(chan->inst, 0x2718, 0x3f800000);
82 	nvkm_wo32(chan->inst, 0x2744, 0x40000000);
83 	nvkm_wo32(chan->inst, 0x2748, 0x3f800000);
84 	nvkm_wo32(chan->inst, 0x274c, 0x3f000000);
85 	nvkm_wo32(chan->inst, 0x2754, 0x40000000);
86 	nvkm_wo32(chan->inst, 0x2758, 0x3f800000);
87 	nvkm_wo32(chan->inst, 0x2760, 0xbf800000);
88 	nvkm_wo32(chan->inst, 0x2768, 0xbf800000);
89 	nvkm_wo32(chan->inst, 0x308c, 0x000fe000);
90 	nvkm_wo32(chan->inst, 0x3108, 0x000003f8);
91 	nvkm_wo32(chan->inst, 0x3468, 0x002fe000);
92 	for (i = 0x3484; i <= 0x34a0; i += 4)
93 		nvkm_wo32(chan->inst, i, 0x001c527c);
94 	nvkm_done(chan->inst);
95 	return 0;
96 }
97 
98 /*******************************************************************************
99  * PGRAPH engine/subdev functions
100  ******************************************************************************/
101 
102 static const struct nvkm_gr_func
103 nv25_gr = {
104 	.dtor = nv20_gr_dtor,
105 	.oneinit = nv20_gr_oneinit,
106 	.init = nv20_gr_init,
107 	.intr = nv20_gr_intr,
108 	.tile = nv20_gr_tile,
109 	.chan_new = nv25_gr_chan_new,
110 	.sclass = {
111 		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
112 		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
113 		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
114 		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
115 		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
116 		{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
117 		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
118 		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
119 		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
120 		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
121 		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
122 		{ -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
123 		{ -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
124 		{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
125 		{ -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
126 		{}
127 	}
128 };
129 
130 int
131 nv25_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
132 {
133 	return nv20_gr_new_(&nv25_gr, device, index, pgr);
134 }
135