1 /*
2  * Copyright 2015 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs <bskeggs@redhat.com>
23  */
24 #include "gf100.h"
25 #include "ctxgf100.h"
26 
27 #include <subdev/secboot.h>
28 
29 #include <nvif/class.h>
30 
31 /*******************************************************************************
32  * PGRAPH engine/subdev functions
33  ******************************************************************************/
34 
35 int
36 gm200_gr_init(struct gf100_gr *gr)
37 {
38 	struct nvkm_device *device = gr->base.engine.subdev.device;
39 	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
40 	u32 data[TPC_MAX / 8] = {}, tmp;
41 	u8  tpcnr[GPC_MAX];
42 	int gpc, tpc, ppc, rop;
43 	int i;
44 
45 	tmp = nvkm_rd32(device, 0x100c80); /*XXX: mask? */
46 	nvkm_wr32(device, 0x418880, 0x00001000 | (tmp & 0x00000fff));
47 	nvkm_wr32(device, 0x418890, 0x00000000);
48 	nvkm_wr32(device, 0x418894, 0x00000000);
49 	nvkm_wr32(device, 0x4188b4, nvkm_memory_addr(gr->unk4188b4) >> 8);
50 	nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(gr->unk4188b8) >> 8);
51 	nvkm_mask(device, 0x4188b0, 0x00040000, 0x00040000);
52 
53 	/*XXX: belongs in fb */
54 	nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
55 	nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
56 	nvkm_mask(device, 0x100cc4, 0x00040000, 0x00040000);
57 
58 	gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
59 
60 	gm107_gr_init_bios(gr);
61 
62 	nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
63 
64 	memset(data, 0x00, sizeof(data));
65 	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
66 	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
67 		do {
68 			gpc = (gpc + 1) % gr->gpc_nr;
69 		} while (!tpcnr[gpc]);
70 		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
71 
72 		data[i / 8] |= tpc << ((i % 8) * 4);
73 	}
74 
75 	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
76 	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
77 	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
78 	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
79 
80 	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
81 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
82 			gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
83 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
84 			gr->tpc_total);
85 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
86 	}
87 
88 	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
89 	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
90 	nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
91 
92 	nvkm_wr32(device, 0x400500, 0x00010001);
93 	nvkm_wr32(device, 0x400100, 0xffffffff);
94 	nvkm_wr32(device, 0x40013c, 0xffffffff);
95 	nvkm_wr32(device, 0x400124, 0x00000002);
96 	nvkm_wr32(device, 0x409c24, 0x000e0000);
97 	nvkm_wr32(device, 0x405848, 0xc0000000);
98 	nvkm_wr32(device, 0x40584c, 0x00000001);
99 	nvkm_wr32(device, 0x404000, 0xc0000000);
100 	nvkm_wr32(device, 0x404600, 0xc0000000);
101 	nvkm_wr32(device, 0x408030, 0xc0000000);
102 	nvkm_wr32(device, 0x404490, 0xc0000000);
103 	nvkm_wr32(device, 0x406018, 0xc0000000);
104 	nvkm_wr32(device, 0x407020, 0x40000000);
105 	nvkm_wr32(device, 0x405840, 0xc0000000);
106 	nvkm_wr32(device, 0x405844, 0x00ffffff);
107 	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
108 
109 	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
110 		for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++)
111 			nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
112 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
113 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
114 		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
115 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
116 		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
117 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
118 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
119 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
120 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
121 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
122 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
123 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
124 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
125 		}
126 		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
127 		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
128 	}
129 
130 	for (rop = 0; rop < gr->rop_nr; rop++) {
131 		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
132 		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
133 		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
134 		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
135 	}
136 
137 	nvkm_wr32(device, 0x400108, 0xffffffff);
138 	nvkm_wr32(device, 0x400138, 0xffffffff);
139 	nvkm_wr32(device, 0x400118, 0xffffffff);
140 	nvkm_wr32(device, 0x400130, 0xffffffff);
141 	nvkm_wr32(device, 0x40011c, 0xffffffff);
142 	nvkm_wr32(device, 0x400134, 0xffffffff);
143 
144 	nvkm_wr32(device, 0x400054, 0x2c350f63);
145 
146 	gf100_gr_zbc_init(gr);
147 
148 	return gf100_gr_init_ctxctl(gr);
149 }
150 
151 int
152 gm200_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
153 	      int index, struct nvkm_gr **pgr)
154 {
155 	struct gf100_gr *gr;
156 	int ret;
157 
158 	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
159 		return -ENOMEM;
160 	*pgr = &gr->base;
161 
162 	ret = gf100_gr_ctor(func, device, index, gr);
163 	if (ret)
164 		return ret;
165 
166 	/* Load firmwares for non-secure falcons */
167 	if (!nvkm_secboot_is_managed(device->secboot,
168 				     NVKM_SECBOOT_FALCON_FECS)) {
169 		if ((ret = gf100_gr_ctor_fw(gr, "gr/fecs_inst", &gr->fuc409c)) ||
170 		    (ret = gf100_gr_ctor_fw(gr, "gr/fecs_data", &gr->fuc409d)))
171 			return ret;
172 	}
173 	if (!nvkm_secboot_is_managed(device->secboot,
174 				     NVKM_SECBOOT_FALCON_GPCCS)) {
175 		if ((ret = gf100_gr_ctor_fw(gr, "gr/gpccs_inst", &gr->fuc41ac)) ||
176 		    (ret = gf100_gr_ctor_fw(gr, "gr/gpccs_data", &gr->fuc41ad)))
177 			return ret;
178 	}
179 
180 	if ((ret = gk20a_gr_av_to_init(gr, "gr/sw_nonctx", &gr->fuc_sw_nonctx)) ||
181 	    (ret = gk20a_gr_aiv_to_init(gr, "gr/sw_ctx", &gr->fuc_sw_ctx)) ||
182 	    (ret = gk20a_gr_av_to_init(gr, "gr/sw_bundle_init", &gr->fuc_bundle)) ||
183 	    (ret = gk20a_gr_av_to_method(gr, "gr/sw_method_init", &gr->fuc_method)))
184 		return ret;
185 
186 	return 0;
187 }
188 
189 static const struct gf100_gr_func
190 gm200_gr = {
191 	.init = gm200_gr_init,
192 	.ppc_nr = 2,
193 	.grctx = &gm200_grctx,
194 	.sclass = {
195 		{ -1, -1, FERMI_TWOD_A },
196 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
197 		{ -1, -1, MAXWELL_B, &gf100_fermi },
198 		{ -1, -1, MAXWELL_COMPUTE_B },
199 		{}
200 	}
201 };
202 
203 int
204 gm200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
205 {
206 	return gm200_gr_new_(&gm200_gr, device, index, pgr);
207 }
208