1 /*
2 * Copyright 2019 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22 #include "gf100.h"
23 #include "ctxgf100.h"
24
25 #include <core/firmware.h>
26 #include <subdev/acr.h>
27 #include <subdev/timer.h>
28 #include <subdev/vfn.h>
29
30 #include <nvfw/flcn.h>
31
32 #include <nvif/class.h>
33
34 static void
ga102_gr_zbc_clear_color(struct gf100_gr * gr,int zbc)35 ga102_gr_zbc_clear_color(struct gf100_gr *gr, int zbc)
36 {
37 struct nvkm_device *device = gr->base.engine.subdev.device;
38 u32 invalid[] = { 0, 0, 0, 0 }, *color;
39
40 if (gr->zbc_color[zbc].format)
41 color = gr->zbc_color[zbc].l2;
42 else
43 color = invalid;
44
45 nvkm_mask(device, 0x41bcb4, 0x0000001f, zbc);
46 nvkm_wr32(device, 0x41bcec, color[0]);
47 nvkm_wr32(device, 0x41bcf0, color[1]);
48 nvkm_wr32(device, 0x41bcf4, color[2]);
49 nvkm_wr32(device, 0x41bcf8, color[3]);
50 }
51
52 static const struct gf100_gr_func_zbc
53 ga102_gr_zbc = {
54 .clear_color = ga102_gr_zbc_clear_color,
55 .clear_depth = gp100_gr_zbc_clear_depth,
56 .stencil_get = gp102_gr_zbc_stencil_get,
57 .clear_stencil = gp102_gr_zbc_clear_stencil,
58 };
59
60 static void
ga102_gr_gpccs_reset(struct gf100_gr * gr)61 ga102_gr_gpccs_reset(struct gf100_gr *gr)
62 {
63 struct nvkm_device *device = gr->base.engine.subdev.device;
64
65 nvkm_wr32(device, 0x41a610, 0x00000000);
66 nvkm_msec(device, 1, NVKM_DELAY);
67 nvkm_wr32(device, 0x41a610, 0x00000001);
68 }
69
70 static const struct nvkm_acr_lsf_func
71 ga102_gr_gpccs_acr = {
72 .flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD,
73 .bl_entry = 0x3400,
74 .bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
75 .bld_write = gp108_gr_acr_bld_write,
76 .bld_patch = gp108_gr_acr_bld_patch,
77 };
78
79 static void
ga102_gr_fecs_reset(struct gf100_gr * gr)80 ga102_gr_fecs_reset(struct gf100_gr *gr)
81 {
82 struct nvkm_device *device = gr->base.engine.subdev.device;
83
84 nvkm_wr32(device, 0x409614, 0x00000010);
85 nvkm_wr32(device, 0x41a614, 0x00000020);
86 nvkm_usec(device, 10, NVKM_DELAY);
87 nvkm_wr32(device, 0x409614, 0x00000110);
88 nvkm_wr32(device, 0x41a614, 0x00000a20);
89 nvkm_usec(device, 10, NVKM_DELAY);
90 nvkm_rd32(device, 0x409614);
91 nvkm_rd32(device, 0x41a614);
92 }
93
94 static const struct nvkm_acr_lsf_func
95 ga102_gr_fecs_acr = {
96 .bl_entry = 0x7e00,
97 .bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
98 .bld_write = gp108_gr_acr_bld_write,
99 .bld_patch = gp108_gr_acr_bld_patch,
100 };
101
102 static void
ga102_gr_init_rop_exceptions(struct gf100_gr * gr)103 ga102_gr_init_rop_exceptions(struct gf100_gr *gr)
104 {
105 struct nvkm_device *device = gr->base.engine.subdev.device;
106
107 nvkm_wr32(device, 0x41bcbc, 0x40000000);
108 nvkm_wr32(device, 0x41bc38, 0x40000000);
109 nvkm_wr32(device, 0x41ac94, nvkm_rd32(device, 0x502c94));
110 }
111
112 static void
ga102_gr_init_40a790(struct gf100_gr * gr)113 ga102_gr_init_40a790(struct gf100_gr *gr)
114 {
115 nvkm_wr32(gr->base.engine.subdev.device, 0x40a790, 0xc0000000);
116 }
117
118 static void
ga102_gr_init_gpc_mmu(struct gf100_gr * gr)119 ga102_gr_init_gpc_mmu(struct gf100_gr *gr)
120 {
121 struct nvkm_device *device = gr->base.engine.subdev.device;
122
123 nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0xf8001fff);
124 nvkm_wr32(device, 0x418894, 0x00000000);
125
126 nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8));
127 nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc));
128 nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4));
129 }
130
131 static struct nvkm_intr *
ga102_gr_oneinit_intr(struct gf100_gr * gr,enum nvkm_intr_type * pvector)132 ga102_gr_oneinit_intr(struct gf100_gr *gr, enum nvkm_intr_type *pvector)
133 {
134 struct nvkm_device *device = gr->base.engine.subdev.device;
135
136 *pvector = nvkm_rd32(device, 0x400154) & 0x00000fff;
137 return &device->vfn->intr;
138 }
139
140 static int
ga102_gr_nonstall(struct gf100_gr * gr)141 ga102_gr_nonstall(struct gf100_gr *gr)
142 {
143 return nvkm_rd32(gr->base.engine.subdev.device, 0x400160) & 0x00000fff;
144 }
145
146 static const struct gf100_gr_func
147 ga102_gr = {
148 .nonstall = ga102_gr_nonstall,
149 .oneinit_intr = ga102_gr_oneinit_intr,
150 .oneinit_tiles = gm200_gr_oneinit_tiles,
151 .oneinit_sm_id = gv100_gr_oneinit_sm_id,
152 .init = gf100_gr_init,
153 .init_419bd8 = gv100_gr_init_419bd8,
154 .init_gpc_mmu = ga102_gr_init_gpc_mmu,
155 .init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
156 .init_zcull = tu102_gr_init_zcull,
157 .init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
158 .init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
159 .init_fs = tu102_gr_init_fs,
160 .init_fecs_exceptions = tu102_gr_init_fecs_exceptions,
161 .init_40a790 = ga102_gr_init_40a790,
162 .init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
163 .init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
164 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
165 .init_504430 = gv100_gr_init_504430,
166 .init_shader_exceptions = gv100_gr_init_shader_exceptions,
167 .init_rop_exceptions = ga102_gr_init_rop_exceptions,
168 .init_4188a4 = gv100_gr_init_4188a4,
169 .trap_mp = gv100_gr_trap_mp,
170 .fecs.reset = ga102_gr_fecs_reset,
171 .gpccs.reset = ga102_gr_gpccs_reset,
172 .rops = gm200_gr_rops,
173 .gpc_nr = 7,
174 .tpc_nr = 6,
175 .ppc_nr = 3,
176 .grctx = &ga102_grctx,
177 .zbc = &ga102_gr_zbc,
178 .sclass = {
179 { -1, -1, FERMI_TWOD_A },
180 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
181 { -1, -1, AMPERE_B, &gf100_fermi },
182 { -1, -1, AMPERE_COMPUTE_B },
183 {}
184 }
185 };
186
187 MODULE_FIRMWARE("nvidia/ga102/gr/fecs_bl.bin");
188 MODULE_FIRMWARE("nvidia/ga102/gr/fecs_sig.bin");
189 MODULE_FIRMWARE("nvidia/ga102/gr/gpccs_bl.bin");
190 MODULE_FIRMWARE("nvidia/ga102/gr/gpccs_sig.bin");
191 MODULE_FIRMWARE("nvidia/ga102/gr/NET_img.bin");
192
193 MODULE_FIRMWARE("nvidia/ga103/gr/fecs_bl.bin");
194 MODULE_FIRMWARE("nvidia/ga103/gr/fecs_sig.bin");
195 MODULE_FIRMWARE("nvidia/ga103/gr/gpccs_bl.bin");
196 MODULE_FIRMWARE("nvidia/ga103/gr/gpccs_sig.bin");
197 MODULE_FIRMWARE("nvidia/ga103/gr/NET_img.bin");
198
199 MODULE_FIRMWARE("nvidia/ga104/gr/fecs_bl.bin");
200 MODULE_FIRMWARE("nvidia/ga104/gr/fecs_sig.bin");
201 MODULE_FIRMWARE("nvidia/ga104/gr/gpccs_bl.bin");
202 MODULE_FIRMWARE("nvidia/ga104/gr/gpccs_sig.bin");
203 MODULE_FIRMWARE("nvidia/ga104/gr/NET_img.bin");
204
205 MODULE_FIRMWARE("nvidia/ga106/gr/fecs_bl.bin");
206 MODULE_FIRMWARE("nvidia/ga106/gr/fecs_sig.bin");
207 MODULE_FIRMWARE("nvidia/ga106/gr/gpccs_bl.bin");
208 MODULE_FIRMWARE("nvidia/ga106/gr/gpccs_sig.bin");
209 MODULE_FIRMWARE("nvidia/ga106/gr/NET_img.bin");
210
211 MODULE_FIRMWARE("nvidia/ga107/gr/fecs_bl.bin");
212 MODULE_FIRMWARE("nvidia/ga107/gr/fecs_sig.bin");
213 MODULE_FIRMWARE("nvidia/ga107/gr/gpccs_bl.bin");
214 MODULE_FIRMWARE("nvidia/ga107/gr/gpccs_sig.bin");
215 MODULE_FIRMWARE("nvidia/ga107/gr/NET_img.bin");
216
217 struct netlist_region {
218 u32 region_id;
219 u32 data_size;
220 u32 data_offset;
221 };
222
223 struct netlist_image_header {
224 u32 version;
225 u32 regions;
226 };
227
228 struct netlist_image {
229 struct netlist_image_header header;
230 struct netlist_region regions[];
231 };
232
233 struct netlist_av64 {
234 u32 addr;
235 u32 data_hi;
236 u32 data_lo;
237 };
238
239 static int
ga102_gr_av64_to_init(struct nvkm_blob * blob,struct gf100_gr_pack ** ppack)240 ga102_gr_av64_to_init(struct nvkm_blob *blob, struct gf100_gr_pack **ppack)
241 {
242 struct gf100_gr_init *init;
243 struct gf100_gr_pack *pack;
244 int nent;
245 int i;
246
247 nent = (blob->size / sizeof(struct netlist_av64));
248 pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
249 if (!pack)
250 return -ENOMEM;
251
252 init = (void *)(pack + 2);
253 pack[0].init = init;
254 pack[0].type = 64;
255
256 for (i = 0; i < nent; i++) {
257 struct gf100_gr_init *ent = &init[i];
258 struct netlist_av64 *av = &((struct netlist_av64 *)blob->data)[i];
259
260 ent->addr = av->addr;
261 ent->data = ((u64)av->data_hi << 32) | av->data_lo;
262 ent->count = 1;
263 ent->pitch = 1;
264 }
265
266 *ppack = pack;
267 return 0;
268 }
269
270 static int
ga102_gr_load(struct gf100_gr * gr,int ver,const struct gf100_gr_fwif * fwif)271 ga102_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
272 {
273 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
274 const struct firmware *fw;
275 const struct netlist_image *net;
276 const struct netlist_region *fecs_inst = NULL;
277 const struct netlist_region *fecs_data = NULL;
278 const struct netlist_region *gpccs_inst = NULL;
279 const struct netlist_region *gpccs_data = NULL;
280 int ret, i;
281
282 ret = nvkm_firmware_get(subdev, "gr/NET_img", 0, &fw);
283 if (ret)
284 return ret;
285
286 net = (const void *)fw->data;
287 nvkm_debug(subdev, "netlist version %d, %d regions\n",
288 net->header.version, net->header.regions);
289
290 for (i = 0; i < net->header.regions; i++) {
291 const struct netlist_region *reg = &net->regions[i];
292 struct nvkm_blob blob = {
293 .data = (void *)fw->data + reg->data_offset,
294 .size = reg->data_size,
295 };
296
297 nvkm_debug(subdev, "\t%2d: %08x %08x\n",
298 reg->region_id, reg->data_offset, reg->data_size);
299
300 switch (reg->region_id) {
301 case 0: fecs_data = reg; break;
302 case 1: fecs_inst = reg; break;
303 case 2: gpccs_data = reg; break;
304 case 3: gpccs_inst = reg; break;
305 case 4: gk20a_gr_av_to_init(&blob, &gr->bundle); break;
306 case 5: gk20a_gr_aiv_to_init(&blob, &gr->sw_ctx); break;
307 case 7: gk20a_gr_av_to_method(&blob, &gr->method); break;
308 case 28: tu102_gr_av_to_init_veid(&blob, &gr->bundle_veid); break;
309 case 34: ga102_gr_av64_to_init(&blob, &gr->bundle64); break;
310 case 48: gk20a_gr_av_to_init(&blob, &gr->sw_nonctx1); break;
311 case 49: gk20a_gr_av_to_init(&blob, &gr->sw_nonctx2); break;
312 case 50: gk20a_gr_av_to_init(&blob, &gr->sw_nonctx3); break;
313 case 51: gk20a_gr_av_to_init(&blob, &gr->sw_nonctx4); break;
314 default:
315 break;
316 }
317 }
318
319 ret = nvkm_acr_lsfw_load_bl_sig_net(subdev, &gr->fecs.falcon, NVKM_ACR_LSF_FECS,
320 "gr/fecs_", ver, fwif->fecs,
321 fw->data + fecs_inst->data_offset,
322 fecs_inst->data_size,
323 fw->data + fecs_data->data_offset,
324 fecs_data->data_size);
325 if (ret)
326 return ret;
327
328 ret = nvkm_acr_lsfw_load_bl_sig_net(subdev, &gr->gpccs.falcon, NVKM_ACR_LSF_GPCCS,
329 "gr/gpccs_", ver, fwif->gpccs,
330 fw->data + gpccs_inst->data_offset,
331 gpccs_inst->data_size,
332 fw->data + gpccs_data->data_offset,
333 gpccs_data->data_size);
334 if (ret)
335 return ret;
336
337 gr->firmware = true;
338
339 nvkm_firmware_put(fw);
340 return 0;
341 }
342
343 static const struct gf100_gr_fwif
344 ga102_gr_fwif[] = {
345 { 0, ga102_gr_load, &ga102_gr, &ga102_gr_fecs_acr, &ga102_gr_gpccs_acr },
346 { -1, gm200_gr_nofw },
347 {}
348 };
349
350 int
ga102_gr_new(struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_gr ** pgr)351 ga102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
352 {
353 return gf100_gr_new_(ga102_gr_fwif, device, type, inst, pgr);
354 }
355