1 /* 2 * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 #include "gf100.h" 23 #include "ctxgf100.h" 24 25 #include <subdev/timer.h> 26 27 #include <nvif/class.h> 28 29 struct gk20a_fw_av 30 { 31 u32 addr; 32 u32 data; 33 }; 34 35 int 36 gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name, 37 struct gf100_gr_pack **ppack) 38 { 39 struct gf100_gr_fuc fuc; 40 struct gf100_gr_init *init; 41 struct gf100_gr_pack *pack; 42 int nent; 43 int ret; 44 int i; 45 46 ret = gf100_gr_ctor_fw(gr, fw_name, &fuc); 47 if (ret) 48 return ret; 49 50 nent = (fuc.size / sizeof(struct gk20a_fw_av)); 51 pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1))); 52 if (!pack) { 53 ret = -ENOMEM; 54 goto end; 55 } 56 57 init = (void *)(pack + 2); 58 pack[0].init = init; 59 60 for (i = 0; i < nent; i++) { 61 struct gf100_gr_init *ent = &init[i]; 62 struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i]; 63 64 ent->addr = av->addr; 65 ent->data = av->data; 66 ent->count = 1; 67 ent->pitch = 1; 68 } 69 70 *ppack = pack; 71 72 end: 73 gf100_gr_dtor_fw(&fuc); 74 return ret; 75 } 76 77 struct gk20a_fw_aiv 78 { 79 u32 addr; 80 u32 index; 81 u32 data; 82 }; 83 84 int 85 gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name, 86 struct gf100_gr_pack **ppack) 87 { 88 struct gf100_gr_fuc fuc; 89 struct gf100_gr_init *init; 90 struct gf100_gr_pack *pack; 91 int nent; 92 int ret; 93 int i; 94 95 ret = gf100_gr_ctor_fw(gr, fw_name, &fuc); 96 if (ret) 97 return ret; 98 99 nent = (fuc.size / sizeof(struct gk20a_fw_aiv)); 100 pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1))); 101 if (!pack) { 102 ret = -ENOMEM; 103 goto end; 104 } 105 106 init = (void *)(pack + 2); 107 pack[0].init = init; 108 109 for (i = 0; i < nent; i++) { 110 struct gf100_gr_init *ent = &init[i]; 111 struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)fuc.data)[i]; 112 113 ent->addr = av->addr; 114 ent->data = av->data; 115 ent->count = 1; 116 ent->pitch = 1; 117 } 118 119 *ppack = pack; 120 121 end: 122 gf100_gr_dtor_fw(&fuc); 123 return ret; 124 } 125 126 int 127 gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name, 128 struct gf100_gr_pack **ppack) 129 { 130 struct gf100_gr_fuc fuc; 131 struct gf100_gr_init *init; 132 struct gf100_gr_pack *pack; 133 /* We don't suppose we will initialize more than 16 classes here... */ 134 static const unsigned int max_classes = 16; 135 u32 classidx = 0, prevclass = 0; 136 int nent; 137 int ret; 138 int i; 139 140 ret = gf100_gr_ctor_fw(gr, fw_name, &fuc); 141 if (ret) 142 return ret; 143 144 nent = (fuc.size / sizeof(struct gk20a_fw_av)); 145 146 pack = vzalloc((sizeof(*pack) * max_classes) + 147 (sizeof(*init) * (nent + 1))); 148 if (!pack) { 149 ret = -ENOMEM; 150 goto end; 151 } 152 153 init = (void *)(pack + max_classes); 154 155 for (i = 0; i < nent; i++) { 156 struct gf100_gr_init *ent = &init[i]; 157 struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i]; 158 u32 class = av->addr & 0xffff; 159 u32 addr = (av->addr & 0xffff0000) >> 14; 160 161 if (prevclass != class) { 162 pack[classidx].init = ent; 163 pack[classidx].type = class; 164 prevclass = class; 165 if (++classidx >= max_classes) { 166 vfree(pack); 167 ret = -ENOSPC; 168 goto end; 169 } 170 } 171 172 ent->addr = addr; 173 ent->data = av->data; 174 ent->count = 1; 175 ent->pitch = 1; 176 } 177 178 *ppack = pack; 179 180 end: 181 gf100_gr_dtor_fw(&fuc); 182 return ret; 183 } 184 185 static int 186 gk20a_gr_wait_mem_scrubbing(struct gf100_gr *gr) 187 { 188 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 189 struct nvkm_device *device = subdev->device; 190 191 if (nvkm_msec(device, 2000, 192 if (!(nvkm_rd32(device, 0x40910c) & 0x00000006)) 193 break; 194 ) < 0) { 195 nvkm_error(subdev, "FECS mem scrubbing timeout\n"); 196 return -ETIMEDOUT; 197 } 198 199 if (nvkm_msec(device, 2000, 200 if (!(nvkm_rd32(device, 0x41a10c) & 0x00000006)) 201 break; 202 ) < 0) { 203 nvkm_error(subdev, "GPCCS mem scrubbing timeout\n"); 204 return -ETIMEDOUT; 205 } 206 207 return 0; 208 } 209 210 static void 211 gk20a_gr_set_hww_esr_report_mask(struct gf100_gr *gr) 212 { 213 struct nvkm_device *device = gr->base.engine.subdev.device; 214 nvkm_wr32(device, 0x419e44, 0x1ffffe); 215 nvkm_wr32(device, 0x419e4c, 0x7f); 216 } 217 218 int 219 gk20a_gr_init(struct gf100_gr *gr) 220 { 221 struct nvkm_device *device = gr->base.engine.subdev.device; 222 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total); 223 u32 data[TPC_MAX / 8] = {}; 224 u8 tpcnr[GPC_MAX]; 225 int gpc, tpc; 226 int ret, i; 227 228 /* Clear SCC RAM */ 229 nvkm_wr32(device, 0x40802c, 0x1); 230 231 gf100_gr_mmio(gr, gr->fuc_sw_nonctx); 232 233 ret = gk20a_gr_wait_mem_scrubbing(gr); 234 if (ret) 235 return ret; 236 237 ret = gf100_gr_wait_idle(gr); 238 if (ret) 239 return ret; 240 241 /* MMU debug buffer */ 242 if (gr->func->init_gpc_mmu) 243 gr->func->init_gpc_mmu(gr); 244 245 /* Set the PE as stream master */ 246 nvkm_mask(device, 0x503018, 0x1, 0x1); 247 248 /* Zcull init */ 249 memset(data, 0x00, sizeof(data)); 250 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr)); 251 for (i = 0, gpc = -1; i < gr->tpc_total; i++) { 252 do { 253 gpc = (gpc + 1) % gr->gpc_nr; 254 } while (!tpcnr[gpc]); 255 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--; 256 257 data[i / 8] |= tpc << ((i % 8) * 4); 258 } 259 260 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]); 261 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]); 262 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]); 263 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]); 264 265 for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 266 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914), 267 gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]); 268 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 | 269 gr->tpc_total); 270 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918); 271 } 272 273 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918); 274 275 gr->func->init_rop_active_fbps(gr); 276 277 /* Enable FIFO access */ 278 nvkm_wr32(device, 0x400500, 0x00010001); 279 280 /* Enable interrupts */ 281 nvkm_wr32(device, 0x400100, 0xffffffff); 282 nvkm_wr32(device, 0x40013c, 0xffffffff); 283 284 /* Enable FECS error interrupts */ 285 nvkm_wr32(device, 0x409c24, 0x000f0000); 286 287 /* Enable hardware warning exceptions */ 288 nvkm_wr32(device, 0x404000, 0xc0000000); 289 nvkm_wr32(device, 0x404600, 0xc0000000); 290 291 if (gr->func->set_hww_esr_report_mask) 292 gr->func->set_hww_esr_report_mask(gr); 293 294 /* Enable TPC exceptions per GPC */ 295 nvkm_wr32(device, 0x419d0c, 0x2); 296 nvkm_wr32(device, 0x41ac94, (((1 << gr->tpc_total) - 1) & 0xff) << 16); 297 298 /* Reset and enable all exceptions */ 299 nvkm_wr32(device, 0x400108, 0xffffffff); 300 nvkm_wr32(device, 0x400138, 0xffffffff); 301 nvkm_wr32(device, 0x400118, 0xffffffff); 302 nvkm_wr32(device, 0x400130, 0xffffffff); 303 nvkm_wr32(device, 0x40011c, 0xffffffff); 304 nvkm_wr32(device, 0x400134, 0xffffffff); 305 306 gf100_gr_zbc_init(gr); 307 308 return gf100_gr_init_ctxctl(gr); 309 } 310 311 static const struct gf100_gr_func 312 gk20a_gr = { 313 .init = gk20a_gr_init, 314 .init_rop_active_fbps = gk104_gr_init_rop_active_fbps, 315 .set_hww_esr_report_mask = gk20a_gr_set_hww_esr_report_mask, 316 .rops = gf100_gr_rops, 317 .ppc_nr = 1, 318 .grctx = &gk20a_grctx, 319 .sclass = { 320 { -1, -1, FERMI_TWOD_A }, 321 { -1, -1, KEPLER_INLINE_TO_MEMORY_A }, 322 { -1, -1, KEPLER_C, &gf100_fermi }, 323 { -1, -1, KEPLER_COMPUTE_A }, 324 {} 325 } 326 }; 327 328 int 329 gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 330 { 331 struct gf100_gr *gr; 332 int ret; 333 334 if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) 335 return -ENOMEM; 336 *pgr = &gr->base; 337 338 ret = gf100_gr_ctor(&gk20a_gr, device, index, gr); 339 if (ret) 340 return ret; 341 342 if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) || 343 gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) || 344 gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) || 345 gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad)) 346 return -ENODEV; 347 348 ret = gk20a_gr_av_to_init(gr, "sw_nonctx", &gr->fuc_sw_nonctx); 349 if (ret) 350 return ret; 351 352 ret = gk20a_gr_aiv_to_init(gr, "sw_ctx", &gr->fuc_sw_ctx); 353 if (ret) 354 return ret; 355 356 ret = gk20a_gr_av_to_init(gr, "sw_bundle_init", &gr->fuc_bundle); 357 if (ret) 358 return ret; 359 360 ret = gk20a_gr_av_to_method(gr, "sw_method_init", &gr->fuc_method); 361 if (ret) 362 return ret; 363 364 return 0; 365 } 366