1 /*
2  * Copyright 2019 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "gf100.h"
23 #include "ctxgf100.h"
24 
25 #include <core/firmware.h>
26 #include <subdev/acr.h>
27 #include <subdev/timer.h>
28 #include <subdev/vfn.h>
29 
30 #include <nvfw/flcn.h>
31 
32 #include <nvif/class.h>
33 
34 static void
35 ga102_gr_zbc_clear_color(struct gf100_gr *gr, int zbc)
36 {
37 	struct nvkm_device *device = gr->base.engine.subdev.device;
38 	u32 invalid[] = { 0, 0, 0, 0 }, *color;
39 
40 	if (gr->zbc_color[zbc].format)
41 		color = gr->zbc_color[zbc].l2;
42 	else
43 		color = invalid;
44 
45 	nvkm_mask(device, 0x41bcb4, 0x0000001f, zbc);
46 	nvkm_wr32(device, 0x41bcec, color[0]);
47 	nvkm_wr32(device, 0x41bcf0, color[1]);
48 	nvkm_wr32(device, 0x41bcf4, color[2]);
49 	nvkm_wr32(device, 0x41bcf8, color[3]);
50 }
51 
52 static const struct gf100_gr_func_zbc
53 ga102_gr_zbc = {
54 	.clear_color = ga102_gr_zbc_clear_color,
55 	.clear_depth = gp100_gr_zbc_clear_depth,
56 	.stencil_get = gp102_gr_zbc_stencil_get,
57 	.clear_stencil = gp102_gr_zbc_clear_stencil,
58 };
59 
60 static void
61 ga102_gr_gpccs_reset(struct gf100_gr *gr)
62 {
63 	struct nvkm_device *device = gr->base.engine.subdev.device;
64 
65 	nvkm_wr32(device, 0x41a610, 0x00000000);
66 	nvkm_msec(device, 1, NVKM_DELAY);
67 	nvkm_wr32(device, 0x41a610, 0x00000001);
68 }
69 
70 static const struct nvkm_acr_lsf_func
71 ga102_gr_gpccs_acr = {
72 	.flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD,
73 	.bl_entry = 0x3400,
74 	.bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
75 	.bld_write = gp108_gr_acr_bld_write,
76 	.bld_patch = gp108_gr_acr_bld_patch,
77 };
78 
79 static void
80 ga102_gr_fecs_reset(struct gf100_gr *gr)
81 {
82 	struct nvkm_device *device = gr->base.engine.subdev.device;
83 
84 	nvkm_wr32(device, 0x409614, 0x00000010);
85 	nvkm_wr32(device, 0x41a614, 0x00000020);
86 	nvkm_usec(device, 10, NVKM_DELAY);
87 	nvkm_wr32(device, 0x409614, 0x00000110);
88 	nvkm_wr32(device, 0x41a614, 0x00000a20);
89 	nvkm_usec(device, 10, NVKM_DELAY);
90 	nvkm_rd32(device, 0x409614);
91 	nvkm_rd32(device, 0x41a614);
92 }
93 
94 static const struct nvkm_acr_lsf_func
95 ga102_gr_fecs_acr = {
96 	.bl_entry = 0x7e00,
97 	.bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
98 	.bld_write = gp108_gr_acr_bld_write,
99 	.bld_patch = gp108_gr_acr_bld_patch,
100 };
101 
102 static void
103 ga102_gr_init_rop_exceptions(struct gf100_gr *gr)
104 {
105 	struct nvkm_device *device = gr->base.engine.subdev.device;
106 
107 	nvkm_wr32(device, 0x41bcbc, 0x40000000);
108 	nvkm_wr32(device, 0x41bc38, 0x40000000);
109 	nvkm_wr32(device, 0x41ac94, nvkm_rd32(device, 0x502c94));
110 }
111 
112 static void
113 ga102_gr_init_40a790(struct gf100_gr *gr)
114 {
115 	nvkm_wr32(gr->base.engine.subdev.device, 0x40a790, 0xc0000000);
116 }
117 
118 static void
119 ga102_gr_init_gpc_mmu(struct gf100_gr *gr)
120 {
121 	struct nvkm_device *device = gr->base.engine.subdev.device;
122 
123 	nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0xf8001fff);
124 	nvkm_wr32(device, 0x418894, 0x00000000);
125 
126 	nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8));
127 	nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc));
128 	nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4));
129 }
130 
131 static struct nvkm_intr *
132 ga102_gr_oneinit_intr(struct gf100_gr *gr, enum nvkm_intr_type *pvector)
133 {
134 	struct nvkm_device *device = gr->base.engine.subdev.device;
135 
136 	*pvector = nvkm_rd32(device, 0x400154) & 0x00000fff;
137 	return &device->vfn->intr;
138 }
139 
140 static const struct gf100_gr_func
141 ga102_gr = {
142 	.oneinit_intr = ga102_gr_oneinit_intr,
143 	.oneinit_tiles = gm200_gr_oneinit_tiles,
144 	.oneinit_sm_id = gv100_gr_oneinit_sm_id,
145 	.init = gf100_gr_init,
146 	.init_419bd8 = gv100_gr_init_419bd8,
147 	.init_gpc_mmu = ga102_gr_init_gpc_mmu,
148 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
149 	.init_zcull = tu102_gr_init_zcull,
150 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
151 	.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
152 	.init_fs = tu102_gr_init_fs,
153 	.init_fecs_exceptions = tu102_gr_init_fecs_exceptions,
154 	.init_40a790 = ga102_gr_init_40a790,
155 	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
156 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
157 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
158 	.init_504430 = gv100_gr_init_504430,
159 	.init_shader_exceptions = gv100_gr_init_shader_exceptions,
160 	.init_rop_exceptions = ga102_gr_init_rop_exceptions,
161 	.init_4188a4 = gv100_gr_init_4188a4,
162 	.trap_mp = gv100_gr_trap_mp,
163 	.fecs.reset = ga102_gr_fecs_reset,
164 	.gpccs.reset = ga102_gr_gpccs_reset,
165 	.rops = gm200_gr_rops,
166 	.gpc_nr = 7,
167 	.tpc_nr = 6,
168 	.ppc_nr = 3,
169 	.grctx = &ga102_grctx,
170 	.zbc = &ga102_gr_zbc,
171 	.sclass = {
172 		{ -1, -1, FERMI_TWOD_A },
173 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
174 		{ -1, -1, AMPERE_B, &gf100_fermi },
175 		{ -1, -1, AMPERE_COMPUTE_B },
176 		{}
177 	}
178 };
179 
180 MODULE_FIRMWARE("nvidia/ga102/gr/fecs_bl.bin");
181 MODULE_FIRMWARE("nvidia/ga102/gr/fecs_sig.bin");
182 MODULE_FIRMWARE("nvidia/ga102/gr/gpccs_bl.bin");
183 MODULE_FIRMWARE("nvidia/ga102/gr/gpccs_sig.bin");
184 MODULE_FIRMWARE("nvidia/ga102/gr/NET_img.bin");
185 
186 MODULE_FIRMWARE("nvidia/ga103/gr/fecs_bl.bin");
187 MODULE_FIRMWARE("nvidia/ga103/gr/fecs_sig.bin");
188 MODULE_FIRMWARE("nvidia/ga103/gr/gpccs_bl.bin");
189 MODULE_FIRMWARE("nvidia/ga103/gr/gpccs_sig.bin");
190 MODULE_FIRMWARE("nvidia/ga103/gr/NET_img.bin");
191 
192 MODULE_FIRMWARE("nvidia/ga104/gr/fecs_bl.bin");
193 MODULE_FIRMWARE("nvidia/ga104/gr/fecs_sig.bin");
194 MODULE_FIRMWARE("nvidia/ga104/gr/gpccs_bl.bin");
195 MODULE_FIRMWARE("nvidia/ga104/gr/gpccs_sig.bin");
196 MODULE_FIRMWARE("nvidia/ga104/gr/NET_img.bin");
197 
198 MODULE_FIRMWARE("nvidia/ga106/gr/fecs_bl.bin");
199 MODULE_FIRMWARE("nvidia/ga106/gr/fecs_sig.bin");
200 MODULE_FIRMWARE("nvidia/ga106/gr/gpccs_bl.bin");
201 MODULE_FIRMWARE("nvidia/ga106/gr/gpccs_sig.bin");
202 MODULE_FIRMWARE("nvidia/ga106/gr/NET_img.bin");
203 
204 MODULE_FIRMWARE("nvidia/ga107/gr/fecs_bl.bin");
205 MODULE_FIRMWARE("nvidia/ga107/gr/fecs_sig.bin");
206 MODULE_FIRMWARE("nvidia/ga107/gr/gpccs_bl.bin");
207 MODULE_FIRMWARE("nvidia/ga107/gr/gpccs_sig.bin");
208 MODULE_FIRMWARE("nvidia/ga107/gr/NET_img.bin");
209 
210 struct netlist_region {
211 	u32 region_id;
212 	u32 data_size;
213 	u32 data_offset;
214 };
215 
216 struct netlist_image_header {
217 	u32 version;
218 	u32 regions;
219 };
220 
221 struct netlist_image {
222 	struct netlist_image_header header;
223 	struct netlist_region regions[];
224 };
225 
226 struct netlist_av64 {
227 	u32 addr;
228 	u32 data_hi;
229 	u32 data_lo;
230 };
231 
232 static int
233 ga102_gr_av64_to_init(struct nvkm_blob *blob, struct gf100_gr_pack **ppack)
234 {
235 	struct gf100_gr_init *init;
236 	struct gf100_gr_pack *pack;
237 	int nent;
238 	int i;
239 
240 	nent = (blob->size / sizeof(struct netlist_av64));
241 	pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
242 	if (!pack)
243 		return -ENOMEM;
244 
245 	init = (void *)(pack + 2);
246 	pack[0].init = init;
247 	pack[0].type = 64;
248 
249 	for (i = 0; i < nent; i++) {
250 		struct gf100_gr_init *ent = &init[i];
251 		struct netlist_av64 *av = &((struct netlist_av64 *)blob->data)[i];
252 
253 		ent->addr = av->addr;
254 		ent->data = ((u64)av->data_hi << 32) | av->data_lo;
255 		ent->count = 1;
256 		ent->pitch = 1;
257 	}
258 
259 	*ppack = pack;
260 	return 0;
261 }
262 
263 static int
264 ga102_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
265 {
266 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
267 	const struct firmware *fw;
268 	const struct netlist_image *net;
269 	const struct netlist_region *fecs_inst = NULL;
270 	const struct netlist_region *fecs_data = NULL;
271 	const struct netlist_region *gpccs_inst = NULL;
272 	const struct netlist_region *gpccs_data = NULL;
273 	int ret, i;
274 
275 	ret = nvkm_firmware_get(subdev, "gr/NET_img", 0, &fw);
276 	if (ret)
277 		return ret;
278 
279 	net = (const void *)fw->data;
280 	nvkm_debug(subdev, "netlist version %d, %d regions\n",
281 		   net->header.version, net->header.regions);
282 
283 	for (i = 0; i < net->header.regions; i++) {
284 		const struct netlist_region *reg = &net->regions[i];
285 		struct nvkm_blob blob = {
286 			.data = (void *)fw->data + reg->data_offset,
287 			.size = reg->data_size,
288 		};
289 
290 		nvkm_debug(subdev, "\t%2d: %08x %08x\n",
291 			   reg->region_id, reg->data_offset, reg->data_size);
292 
293 		switch (reg->region_id) {
294 		case  0: fecs_data = reg; break;
295 		case  1: fecs_inst = reg; break;
296 		case  2: gpccs_data = reg; break;
297 		case  3: gpccs_inst = reg; break;
298 		case  4: gk20a_gr_av_to_init(&blob, &gr->bundle); break;
299 		case  5: gk20a_gr_aiv_to_init(&blob, &gr->sw_ctx); break;
300 		case  7: gk20a_gr_av_to_method(&blob, &gr->method); break;
301 		case 28: tu102_gr_av_to_init_veid(&blob, &gr->bundle_veid); break;
302 		case 34: ga102_gr_av64_to_init(&blob, &gr->bundle64); break;
303 		case 48: gk20a_gr_av_to_init(&blob, &gr->sw_nonctx1); break;
304 		case 49: gk20a_gr_av_to_init(&blob, &gr->sw_nonctx2); break;
305 		case 50: gk20a_gr_av_to_init(&blob, &gr->sw_nonctx3); break;
306 		case 51: gk20a_gr_av_to_init(&blob, &gr->sw_nonctx4); break;
307 		default:
308 			break;
309 		}
310 	}
311 
312 	ret = nvkm_acr_lsfw_load_bl_sig_net(subdev, &gr->fecs.falcon, NVKM_ACR_LSF_FECS,
313 					    "gr/fecs_", ver, fwif->fecs,
314 					    fw->data + fecs_inst->data_offset,
315 						       fecs_inst->data_size,
316 					    fw->data + fecs_data->data_offset,
317 						       fecs_data->data_size);
318 	if (ret)
319 		return ret;
320 
321 	ret = nvkm_acr_lsfw_load_bl_sig_net(subdev, &gr->gpccs.falcon, NVKM_ACR_LSF_GPCCS,
322 					    "gr/gpccs_", ver, fwif->gpccs,
323 					    fw->data + gpccs_inst->data_offset,
324 						       gpccs_inst->data_size,
325 					    fw->data + gpccs_data->data_offset,
326 						       gpccs_data->data_size);
327 	if (ret)
328 		return ret;
329 
330 	gr->firmware = true;
331 
332 	nvkm_firmware_put(fw);
333 	return 0;
334 }
335 
336 static const struct gf100_gr_fwif
337 ga102_gr_fwif[] = {
338 	{  0, ga102_gr_load, &ga102_gr, &ga102_gr_fecs_acr, &ga102_gr_gpccs_acr },
339 	{ -1, gm200_gr_nofw },
340 	{}
341 };
342 
343 int
344 ga102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
345 {
346 	return gf100_gr_new_(ga102_gr_fwif, device, type, inst, pgr);
347 }
348