1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "gf100.h" 25 #include "ctxgf100.h" 26 #include "fuc/os.h" 27 28 #include <core/client.h> 29 #include <core/handle.h> 30 #include <core/option.h> 31 #include <engine/fifo.h> 32 #include <subdev/fb.h> 33 #include <subdev/mc.h> 34 #include <subdev/timer.h> 35 36 #include <nvif/class.h> 37 #include <nvif/unpack.h> 38 39 /******************************************************************************* 40 * Zero Bandwidth Clear 41 ******************************************************************************/ 42 43 static void 44 gf100_gr_zbc_clear_color(struct gf100_gr *gr, int zbc) 45 { 46 struct nvkm_device *device = gr->base.engine.subdev.device; 47 if (gr->zbc_color[zbc].format) { 48 nvkm_wr32(device, 0x405804, gr->zbc_color[zbc].ds[0]); 49 nvkm_wr32(device, 0x405808, gr->zbc_color[zbc].ds[1]); 50 nvkm_wr32(device, 0x40580c, gr->zbc_color[zbc].ds[2]); 51 nvkm_wr32(device, 0x405810, gr->zbc_color[zbc].ds[3]); 52 } 53 nvkm_wr32(device, 0x405814, gr->zbc_color[zbc].format); 54 nvkm_wr32(device, 0x405820, zbc); 55 nvkm_wr32(device, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */ 56 } 57 58 static int 59 gf100_gr_zbc_color_get(struct gf100_gr *gr, int format, 60 const u32 ds[4], const u32 l2[4]) 61 { 62 struct nvkm_ltc *ltc = nvkm_ltc(gr); 63 int zbc = -ENOSPC, i; 64 65 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) { 66 if (gr->zbc_color[i].format) { 67 if (gr->zbc_color[i].format != format) 68 continue; 69 if (memcmp(gr->zbc_color[i].ds, ds, sizeof( 70 gr->zbc_color[i].ds))) 71 continue; 72 if (memcmp(gr->zbc_color[i].l2, l2, sizeof( 73 gr->zbc_color[i].l2))) { 74 WARN_ON(1); 75 return -EINVAL; 76 } 77 return i; 78 } else { 79 zbc = (zbc < 0) ? i : zbc; 80 } 81 } 82 83 if (zbc < 0) 84 return zbc; 85 86 memcpy(gr->zbc_color[zbc].ds, ds, sizeof(gr->zbc_color[zbc].ds)); 87 memcpy(gr->zbc_color[zbc].l2, l2, sizeof(gr->zbc_color[zbc].l2)); 88 gr->zbc_color[zbc].format = format; 89 ltc->zbc_color_get(ltc, zbc, l2); 90 gf100_gr_zbc_clear_color(gr, zbc); 91 return zbc; 92 } 93 94 static void 95 gf100_gr_zbc_clear_depth(struct gf100_gr *gr, int zbc) 96 { 97 struct nvkm_device *device = gr->base.engine.subdev.device; 98 if (gr->zbc_depth[zbc].format) 99 nvkm_wr32(device, 0x405818, gr->zbc_depth[zbc].ds); 100 nvkm_wr32(device, 0x40581c, gr->zbc_depth[zbc].format); 101 nvkm_wr32(device, 0x405820, zbc); 102 nvkm_wr32(device, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */ 103 } 104 105 static int 106 gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format, 107 const u32 ds, const u32 l2) 108 { 109 struct nvkm_ltc *ltc = nvkm_ltc(gr); 110 int zbc = -ENOSPC, i; 111 112 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) { 113 if (gr->zbc_depth[i].format) { 114 if (gr->zbc_depth[i].format != format) 115 continue; 116 if (gr->zbc_depth[i].ds != ds) 117 continue; 118 if (gr->zbc_depth[i].l2 != l2) { 119 WARN_ON(1); 120 return -EINVAL; 121 } 122 return i; 123 } else { 124 zbc = (zbc < 0) ? i : zbc; 125 } 126 } 127 128 if (zbc < 0) 129 return zbc; 130 131 gr->zbc_depth[zbc].format = format; 132 gr->zbc_depth[zbc].ds = ds; 133 gr->zbc_depth[zbc].l2 = l2; 134 ltc->zbc_depth_get(ltc, zbc, l2); 135 gf100_gr_zbc_clear_depth(gr, zbc); 136 return zbc; 137 } 138 139 /******************************************************************************* 140 * Graphics object classes 141 ******************************************************************************/ 142 143 static int 144 gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) 145 { 146 struct gf100_gr *gr = (void *)object->engine; 147 union { 148 struct fermi_a_zbc_color_v0 v0; 149 } *args = data; 150 int ret; 151 152 if (nvif_unpack(args->v0, 0, 0, false)) { 153 switch (args->v0.format) { 154 case FERMI_A_ZBC_COLOR_V0_FMT_ZERO: 155 case FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE: 156 case FERMI_A_ZBC_COLOR_V0_FMT_RF32_GF32_BF32_AF32: 157 case FERMI_A_ZBC_COLOR_V0_FMT_R16_G16_B16_A16: 158 case FERMI_A_ZBC_COLOR_V0_FMT_RN16_GN16_BN16_AN16: 159 case FERMI_A_ZBC_COLOR_V0_FMT_RS16_GS16_BS16_AS16: 160 case FERMI_A_ZBC_COLOR_V0_FMT_RU16_GU16_BU16_AU16: 161 case FERMI_A_ZBC_COLOR_V0_FMT_RF16_GF16_BF16_AF16: 162 case FERMI_A_ZBC_COLOR_V0_FMT_A8R8G8B8: 163 case FERMI_A_ZBC_COLOR_V0_FMT_A8RL8GL8BL8: 164 case FERMI_A_ZBC_COLOR_V0_FMT_A2B10G10R10: 165 case FERMI_A_ZBC_COLOR_V0_FMT_AU2BU10GU10RU10: 166 case FERMI_A_ZBC_COLOR_V0_FMT_A8B8G8R8: 167 case FERMI_A_ZBC_COLOR_V0_FMT_A8BL8GL8RL8: 168 case FERMI_A_ZBC_COLOR_V0_FMT_AN8BN8GN8RN8: 169 case FERMI_A_ZBC_COLOR_V0_FMT_AS8BS8GS8RS8: 170 case FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8: 171 case FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10: 172 case FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11: 173 ret = gf100_gr_zbc_color_get(gr, args->v0.format, 174 args->v0.ds, 175 args->v0.l2); 176 if (ret >= 0) { 177 args->v0.index = ret; 178 return 0; 179 } 180 break; 181 default: 182 return -EINVAL; 183 } 184 } 185 186 return ret; 187 } 188 189 static int 190 gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size) 191 { 192 struct gf100_gr *gr = (void *)object->engine; 193 union { 194 struct fermi_a_zbc_depth_v0 v0; 195 } *args = data; 196 int ret; 197 198 if (nvif_unpack(args->v0, 0, 0, false)) { 199 switch (args->v0.format) { 200 case FERMI_A_ZBC_DEPTH_V0_FMT_FP32: 201 ret = gf100_gr_zbc_depth_get(gr, args->v0.format, 202 args->v0.ds, 203 args->v0.l2); 204 return (ret >= 0) ? 0 : -ENOSPC; 205 default: 206 return -EINVAL; 207 } 208 } 209 210 return ret; 211 } 212 213 static int 214 gf100_fermi_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) 215 { 216 switch (mthd) { 217 case FERMI_A_ZBC_COLOR: 218 return gf100_fermi_mthd_zbc_color(object, data, size); 219 case FERMI_A_ZBC_DEPTH: 220 return gf100_fermi_mthd_zbc_depth(object, data, size); 221 default: 222 break; 223 } 224 return -EINVAL; 225 } 226 227 struct nvkm_ofuncs 228 gf100_fermi_ofuncs = { 229 .ctor = _nvkm_object_ctor, 230 .dtor = nvkm_object_destroy, 231 .init = nvkm_object_init, 232 .fini = nvkm_object_fini, 233 .mthd = gf100_fermi_mthd, 234 }; 235 236 static int 237 gf100_gr_set_shader_exceptions(struct nvkm_object *object, u32 mthd, 238 void *pdata, u32 size) 239 { 240 struct gf100_gr *gr = (void *)object->engine; 241 struct nvkm_device *device = gr->base.engine.subdev.device; 242 if (size >= sizeof(u32)) { 243 u32 data = *(u32 *)pdata ? 0xffffffff : 0x00000000; 244 nvkm_wr32(device, 0x419e44, data); 245 nvkm_wr32(device, 0x419e4c, data); 246 return 0; 247 } 248 return -EINVAL; 249 } 250 251 struct nvkm_omthds 252 gf100_gr_9097_omthds[] = { 253 { 0x1528, 0x1528, gf100_gr_set_shader_exceptions }, 254 {} 255 }; 256 257 struct nvkm_omthds 258 gf100_gr_90c0_omthds[] = { 259 { 0x1528, 0x1528, gf100_gr_set_shader_exceptions }, 260 {} 261 }; 262 263 struct nvkm_oclass 264 gf100_gr_sclass[] = { 265 { FERMI_TWOD_A, &nvkm_object_ofuncs }, 266 { FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs }, 267 { FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds }, 268 { FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds }, 269 {} 270 }; 271 272 /******************************************************************************* 273 * PGRAPH context 274 ******************************************************************************/ 275 276 int 277 gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 278 struct nvkm_oclass *oclass, void *args, u32 size, 279 struct nvkm_object **pobject) 280 { 281 struct nvkm_vm *vm = nvkm_client(parent)->vm; 282 struct gf100_gr *gr = (void *)engine; 283 struct gf100_gr_data *data = gr->mmio_data; 284 struct gf100_gr_mmio *mmio = gr->mmio_list; 285 struct gf100_gr_chan *chan; 286 int ret, i; 287 288 /* allocate memory for context, and fill with default values */ 289 ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 290 gr->size, 0x100, 291 NVOBJ_FLAG_ZERO_ALLOC, &chan); 292 *pobject = nv_object(chan); 293 if (ret) 294 return ret; 295 296 /* allocate memory for a "mmio list" buffer that's used by the HUB 297 * fuc to modify some per-context register settings on first load 298 * of the context. 299 */ 300 ret = nvkm_gpuobj_new(nv_object(chan), NULL, 0x1000, 0x100, 0, 301 &chan->mmio); 302 if (ret) 303 return ret; 304 305 ret = nvkm_gpuobj_map_vm(nv_gpuobj(chan->mmio), vm, 306 NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS, 307 &chan->mmio_vma); 308 if (ret) 309 return ret; 310 311 /* allocate buffers referenced by mmio list */ 312 for (i = 0; data->size && i < ARRAY_SIZE(gr->mmio_data); i++) { 313 ret = nvkm_gpuobj_new(nv_object(chan), NULL, data->size, 314 data->align, 0, &chan->data[i].mem); 315 if (ret) 316 return ret; 317 318 ret = nvkm_gpuobj_map_vm(chan->data[i].mem, vm, data->access, 319 &chan->data[i].vma); 320 if (ret) 321 return ret; 322 323 data++; 324 } 325 326 /* finally, fill in the mmio list and point the context at it */ 327 for (i = 0; mmio->addr && i < ARRAY_SIZE(gr->mmio_list); i++) { 328 u32 addr = mmio->addr; 329 u32 data = mmio->data; 330 331 if (mmio->buffer >= 0) { 332 u64 info = chan->data[mmio->buffer].vma.offset; 333 data |= info >> mmio->shift; 334 } 335 336 nv_wo32(chan->mmio, chan->mmio_nr++ * 4, addr); 337 nv_wo32(chan->mmio, chan->mmio_nr++ * 4, data); 338 mmio++; 339 } 340 341 for (i = 0; i < gr->size; i += 4) 342 nv_wo32(chan, i, gr->data[i / 4]); 343 344 if (!gr->firmware) { 345 nv_wo32(chan, 0x00, chan->mmio_nr / 2); 346 nv_wo32(chan, 0x04, chan->mmio_vma.offset >> 8); 347 } else { 348 nv_wo32(chan, 0xf4, 0); 349 nv_wo32(chan, 0xf8, 0); 350 nv_wo32(chan, 0x10, chan->mmio_nr / 2); 351 nv_wo32(chan, 0x14, lower_32_bits(chan->mmio_vma.offset)); 352 nv_wo32(chan, 0x18, upper_32_bits(chan->mmio_vma.offset)); 353 nv_wo32(chan, 0x1c, 1); 354 nv_wo32(chan, 0x20, 0); 355 nv_wo32(chan, 0x28, 0); 356 nv_wo32(chan, 0x2c, 0); 357 } 358 359 return 0; 360 } 361 362 void 363 gf100_gr_context_dtor(struct nvkm_object *object) 364 { 365 struct gf100_gr_chan *chan = (void *)object; 366 int i; 367 368 for (i = 0; i < ARRAY_SIZE(chan->data); i++) { 369 nvkm_gpuobj_unmap(&chan->data[i].vma); 370 nvkm_gpuobj_ref(NULL, &chan->data[i].mem); 371 } 372 373 nvkm_gpuobj_unmap(&chan->mmio_vma); 374 nvkm_gpuobj_ref(NULL, &chan->mmio); 375 376 nvkm_gr_context_destroy(&chan->base); 377 } 378 379 /******************************************************************************* 380 * PGRAPH register lists 381 ******************************************************************************/ 382 383 const struct gf100_gr_init 384 gf100_gr_init_main_0[] = { 385 { 0x400080, 1, 0x04, 0x003083c2 }, 386 { 0x400088, 1, 0x04, 0x00006fe7 }, 387 { 0x40008c, 1, 0x04, 0x00000000 }, 388 { 0x400090, 1, 0x04, 0x00000030 }, 389 { 0x40013c, 1, 0x04, 0x013901f7 }, 390 { 0x400140, 1, 0x04, 0x00000100 }, 391 { 0x400144, 1, 0x04, 0x00000000 }, 392 { 0x400148, 1, 0x04, 0x00000110 }, 393 { 0x400138, 1, 0x04, 0x00000000 }, 394 { 0x400130, 2, 0x04, 0x00000000 }, 395 { 0x400124, 1, 0x04, 0x00000002 }, 396 {} 397 }; 398 399 const struct gf100_gr_init 400 gf100_gr_init_fe_0[] = { 401 { 0x40415c, 1, 0x04, 0x00000000 }, 402 { 0x404170, 1, 0x04, 0x00000000 }, 403 {} 404 }; 405 406 const struct gf100_gr_init 407 gf100_gr_init_pri_0[] = { 408 { 0x404488, 2, 0x04, 0x00000000 }, 409 {} 410 }; 411 412 const struct gf100_gr_init 413 gf100_gr_init_rstr2d_0[] = { 414 { 0x407808, 1, 0x04, 0x00000000 }, 415 {} 416 }; 417 418 const struct gf100_gr_init 419 gf100_gr_init_pd_0[] = { 420 { 0x406024, 1, 0x04, 0x00000000 }, 421 {} 422 }; 423 424 const struct gf100_gr_init 425 gf100_gr_init_ds_0[] = { 426 { 0x405844, 1, 0x04, 0x00ffffff }, 427 { 0x405850, 1, 0x04, 0x00000000 }, 428 { 0x405908, 1, 0x04, 0x00000000 }, 429 {} 430 }; 431 432 const struct gf100_gr_init 433 gf100_gr_init_scc_0[] = { 434 { 0x40803c, 1, 0x04, 0x00000000 }, 435 {} 436 }; 437 438 const struct gf100_gr_init 439 gf100_gr_init_prop_0[] = { 440 { 0x4184a0, 1, 0x04, 0x00000000 }, 441 {} 442 }; 443 444 const struct gf100_gr_init 445 gf100_gr_init_gpc_unk_0[] = { 446 { 0x418604, 1, 0x04, 0x00000000 }, 447 { 0x418680, 1, 0x04, 0x00000000 }, 448 { 0x418714, 1, 0x04, 0x80000000 }, 449 { 0x418384, 1, 0x04, 0x00000000 }, 450 {} 451 }; 452 453 const struct gf100_gr_init 454 gf100_gr_init_setup_0[] = { 455 { 0x418814, 3, 0x04, 0x00000000 }, 456 {} 457 }; 458 459 const struct gf100_gr_init 460 gf100_gr_init_crstr_0[] = { 461 { 0x418b04, 1, 0x04, 0x00000000 }, 462 {} 463 }; 464 465 const struct gf100_gr_init 466 gf100_gr_init_setup_1[] = { 467 { 0x4188c8, 1, 0x04, 0x80000000 }, 468 { 0x4188cc, 1, 0x04, 0x00000000 }, 469 { 0x4188d0, 1, 0x04, 0x00010000 }, 470 { 0x4188d4, 1, 0x04, 0x00000001 }, 471 {} 472 }; 473 474 const struct gf100_gr_init 475 gf100_gr_init_zcull_0[] = { 476 { 0x418910, 1, 0x04, 0x00010001 }, 477 { 0x418914, 1, 0x04, 0x00000301 }, 478 { 0x418918, 1, 0x04, 0x00800000 }, 479 { 0x418980, 1, 0x04, 0x77777770 }, 480 { 0x418984, 3, 0x04, 0x77777777 }, 481 {} 482 }; 483 484 const struct gf100_gr_init 485 gf100_gr_init_gpm_0[] = { 486 { 0x418c04, 1, 0x04, 0x00000000 }, 487 { 0x418c88, 1, 0x04, 0x00000000 }, 488 {} 489 }; 490 491 const struct gf100_gr_init 492 gf100_gr_init_gpc_unk_1[] = { 493 { 0x418d00, 1, 0x04, 0x00000000 }, 494 { 0x418f08, 1, 0x04, 0x00000000 }, 495 { 0x418e00, 1, 0x04, 0x00000050 }, 496 { 0x418e08, 1, 0x04, 0x00000000 }, 497 {} 498 }; 499 500 const struct gf100_gr_init 501 gf100_gr_init_gcc_0[] = { 502 { 0x41900c, 1, 0x04, 0x00000000 }, 503 { 0x419018, 1, 0x04, 0x00000000 }, 504 {} 505 }; 506 507 const struct gf100_gr_init 508 gf100_gr_init_tpccs_0[] = { 509 { 0x419d08, 2, 0x04, 0x00000000 }, 510 { 0x419d10, 1, 0x04, 0x00000014 }, 511 {} 512 }; 513 514 const struct gf100_gr_init 515 gf100_gr_init_tex_0[] = { 516 { 0x419ab0, 1, 0x04, 0x00000000 }, 517 { 0x419ab8, 1, 0x04, 0x000000e7 }, 518 { 0x419abc, 2, 0x04, 0x00000000 }, 519 {} 520 }; 521 522 const struct gf100_gr_init 523 gf100_gr_init_pe_0[] = { 524 { 0x41980c, 3, 0x04, 0x00000000 }, 525 { 0x419844, 1, 0x04, 0x00000000 }, 526 { 0x41984c, 1, 0x04, 0x00005bc5 }, 527 { 0x419850, 4, 0x04, 0x00000000 }, 528 {} 529 }; 530 531 const struct gf100_gr_init 532 gf100_gr_init_l1c_0[] = { 533 { 0x419c98, 1, 0x04, 0x00000000 }, 534 { 0x419ca8, 1, 0x04, 0x80000000 }, 535 { 0x419cb4, 1, 0x04, 0x00000000 }, 536 { 0x419cb8, 1, 0x04, 0x00008bf4 }, 537 { 0x419cbc, 1, 0x04, 0x28137606 }, 538 { 0x419cc0, 2, 0x04, 0x00000000 }, 539 {} 540 }; 541 542 const struct gf100_gr_init 543 gf100_gr_init_wwdx_0[] = { 544 { 0x419bd4, 1, 0x04, 0x00800000 }, 545 { 0x419bdc, 1, 0x04, 0x00000000 }, 546 {} 547 }; 548 549 const struct gf100_gr_init 550 gf100_gr_init_tpccs_1[] = { 551 { 0x419d2c, 1, 0x04, 0x00000000 }, 552 {} 553 }; 554 555 const struct gf100_gr_init 556 gf100_gr_init_mpc_0[] = { 557 { 0x419c0c, 1, 0x04, 0x00000000 }, 558 {} 559 }; 560 561 static const struct gf100_gr_init 562 gf100_gr_init_sm_0[] = { 563 { 0x419e00, 1, 0x04, 0x00000000 }, 564 { 0x419ea0, 1, 0x04, 0x00000000 }, 565 { 0x419ea4, 1, 0x04, 0x00000100 }, 566 { 0x419ea8, 1, 0x04, 0x00001100 }, 567 { 0x419eac, 1, 0x04, 0x11100702 }, 568 { 0x419eb0, 1, 0x04, 0x00000003 }, 569 { 0x419eb4, 4, 0x04, 0x00000000 }, 570 { 0x419ec8, 1, 0x04, 0x06060618 }, 571 { 0x419ed0, 1, 0x04, 0x0eff0e38 }, 572 { 0x419ed4, 1, 0x04, 0x011104f1 }, 573 { 0x419edc, 1, 0x04, 0x00000000 }, 574 { 0x419f00, 1, 0x04, 0x00000000 }, 575 { 0x419f2c, 1, 0x04, 0x00000000 }, 576 {} 577 }; 578 579 const struct gf100_gr_init 580 gf100_gr_init_be_0[] = { 581 { 0x40880c, 1, 0x04, 0x00000000 }, 582 { 0x408910, 9, 0x04, 0x00000000 }, 583 { 0x408950, 1, 0x04, 0x00000000 }, 584 { 0x408954, 1, 0x04, 0x0000ffff }, 585 { 0x408984, 1, 0x04, 0x00000000 }, 586 { 0x408988, 1, 0x04, 0x08040201 }, 587 { 0x40898c, 1, 0x04, 0x80402010 }, 588 {} 589 }; 590 591 const struct gf100_gr_init 592 gf100_gr_init_fe_1[] = { 593 { 0x4040f0, 1, 0x04, 0x00000000 }, 594 {} 595 }; 596 597 const struct gf100_gr_init 598 gf100_gr_init_pe_1[] = { 599 { 0x419880, 1, 0x04, 0x00000002 }, 600 {} 601 }; 602 603 static const struct gf100_gr_pack 604 gf100_gr_pack_mmio[] = { 605 { gf100_gr_init_main_0 }, 606 { gf100_gr_init_fe_0 }, 607 { gf100_gr_init_pri_0 }, 608 { gf100_gr_init_rstr2d_0 }, 609 { gf100_gr_init_pd_0 }, 610 { gf100_gr_init_ds_0 }, 611 { gf100_gr_init_scc_0 }, 612 { gf100_gr_init_prop_0 }, 613 { gf100_gr_init_gpc_unk_0 }, 614 { gf100_gr_init_setup_0 }, 615 { gf100_gr_init_crstr_0 }, 616 { gf100_gr_init_setup_1 }, 617 { gf100_gr_init_zcull_0 }, 618 { gf100_gr_init_gpm_0 }, 619 { gf100_gr_init_gpc_unk_1 }, 620 { gf100_gr_init_gcc_0 }, 621 { gf100_gr_init_tpccs_0 }, 622 { gf100_gr_init_tex_0 }, 623 { gf100_gr_init_pe_0 }, 624 { gf100_gr_init_l1c_0 }, 625 { gf100_gr_init_wwdx_0 }, 626 { gf100_gr_init_tpccs_1 }, 627 { gf100_gr_init_mpc_0 }, 628 { gf100_gr_init_sm_0 }, 629 { gf100_gr_init_be_0 }, 630 { gf100_gr_init_fe_1 }, 631 { gf100_gr_init_pe_1 }, 632 {} 633 }; 634 635 /******************************************************************************* 636 * PGRAPH engine/subdev functions 637 ******************************************************************************/ 638 639 void 640 gf100_gr_zbc_init(struct gf100_gr *gr) 641 { 642 const u32 zero[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 643 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; 644 const u32 one[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 645 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }; 646 const u32 f32_0[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 647 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; 648 const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 649 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 }; 650 struct nvkm_ltc *ltc = nvkm_ltc(gr); 651 int index; 652 653 if (!gr->zbc_color[0].format) { 654 gf100_gr_zbc_color_get(gr, 1, & zero[0], &zero[4]); 655 gf100_gr_zbc_color_get(gr, 2, & one[0], &one[4]); 656 gf100_gr_zbc_color_get(gr, 4, &f32_0[0], &f32_0[4]); 657 gf100_gr_zbc_color_get(gr, 4, &f32_1[0], &f32_1[4]); 658 gf100_gr_zbc_depth_get(gr, 1, 0x00000000, 0x00000000); 659 gf100_gr_zbc_depth_get(gr, 1, 0x3f800000, 0x3f800000); 660 } 661 662 for (index = ltc->zbc_min; index <= ltc->zbc_max; index++) 663 gf100_gr_zbc_clear_color(gr, index); 664 for (index = ltc->zbc_min; index <= ltc->zbc_max; index++) 665 gf100_gr_zbc_clear_depth(gr, index); 666 } 667 668 /** 669 * Wait until GR goes idle. GR is considered idle if it is disabled by the 670 * MC (0x200) register, or GR is not busy and a context switch is not in 671 * progress. 672 */ 673 int 674 gf100_gr_wait_idle(struct gf100_gr *gr) 675 { 676 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 677 struct nvkm_device *device = subdev->device; 678 unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000); 679 bool gr_enabled, ctxsw_active, gr_busy; 680 681 do { 682 /* 683 * required to make sure FIFO_ENGINE_STATUS (0x2640) is 684 * up-to-date 685 */ 686 nvkm_rd32(device, 0x400700); 687 688 gr_enabled = nvkm_rd32(device, 0x200) & 0x1000; 689 ctxsw_active = nvkm_rd32(device, 0x2640) & 0x8000; 690 gr_busy = nvkm_rd32(device, 0x40060c) & 0x1; 691 692 if (!gr_enabled || (!gr_busy && !ctxsw_active)) 693 return 0; 694 } while (time_before(jiffies, end_jiffies)); 695 696 nvkm_error(subdev, 697 "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n", 698 gr_enabled, ctxsw_active, gr_busy); 699 return -EAGAIN; 700 } 701 702 void 703 gf100_gr_mmio(struct gf100_gr *gr, const struct gf100_gr_pack *p) 704 { 705 struct nvkm_device *device = gr->base.engine.subdev.device; 706 const struct gf100_gr_pack *pack; 707 const struct gf100_gr_init *init; 708 709 pack_for_each_init(init, pack, p) { 710 u32 next = init->addr + init->count * init->pitch; 711 u32 addr = init->addr; 712 while (addr < next) { 713 nvkm_wr32(device, addr, init->data); 714 addr += init->pitch; 715 } 716 } 717 } 718 719 void 720 gf100_gr_icmd(struct gf100_gr *gr, const struct gf100_gr_pack *p) 721 { 722 struct nvkm_device *device = gr->base.engine.subdev.device; 723 const struct gf100_gr_pack *pack; 724 const struct gf100_gr_init *init; 725 u32 data = 0; 726 727 nvkm_wr32(device, 0x400208, 0x80000000); 728 729 pack_for_each_init(init, pack, p) { 730 u32 next = init->addr + init->count * init->pitch; 731 u32 addr = init->addr; 732 733 if ((pack == p && init == p->init) || data != init->data) { 734 nvkm_wr32(device, 0x400204, init->data); 735 data = init->data; 736 } 737 738 while (addr < next) { 739 nvkm_wr32(device, 0x400200, addr); 740 /** 741 * Wait for GR to go idle after submitting a 742 * GO_IDLE bundle 743 */ 744 if ((addr & 0xffff) == 0xe100) 745 gf100_gr_wait_idle(gr); 746 nvkm_msec(device, 2000, 747 if (!(nvkm_rd32(device, 0x400700) & 0x00000004)) 748 break; 749 ); 750 addr += init->pitch; 751 } 752 } 753 754 nvkm_wr32(device, 0x400208, 0x00000000); 755 } 756 757 void 758 gf100_gr_mthd(struct gf100_gr *gr, const struct gf100_gr_pack *p) 759 { 760 struct nvkm_device *device = gr->base.engine.subdev.device; 761 const struct gf100_gr_pack *pack; 762 const struct gf100_gr_init *init; 763 u32 data = 0; 764 765 pack_for_each_init(init, pack, p) { 766 u32 ctrl = 0x80000000 | pack->type; 767 u32 next = init->addr + init->count * init->pitch; 768 u32 addr = init->addr; 769 770 if ((pack == p && init == p->init) || data != init->data) { 771 nvkm_wr32(device, 0x40448c, init->data); 772 data = init->data; 773 } 774 775 while (addr < next) { 776 nvkm_wr32(device, 0x404488, ctrl | (addr << 14)); 777 addr += init->pitch; 778 } 779 } 780 } 781 782 u64 783 gf100_gr_units(struct nvkm_gr *obj) 784 { 785 struct gf100_gr *gr = container_of(obj, typeof(*gr), base); 786 u64 cfg; 787 788 cfg = (u32)gr->gpc_nr; 789 cfg |= (u32)gr->tpc_total << 8; 790 cfg |= (u64)gr->rop_nr << 32; 791 792 return cfg; 793 } 794 795 static const struct nvkm_bitfield gk104_sked_error[] = { 796 { 0x00000080, "CONSTANT_BUFFER_SIZE" }, 797 { 0x00000200, "LOCAL_MEMORY_SIZE_POS" }, 798 { 0x00000400, "LOCAL_MEMORY_SIZE_NEG" }, 799 { 0x00000800, "WARP_CSTACK_SIZE" }, 800 { 0x00001000, "TOTAL_TEMP_SIZE" }, 801 { 0x00002000, "REGISTER_COUNT" }, 802 { 0x00040000, "TOTAL_THREADS" }, 803 { 0x00100000, "PROGRAM_OFFSET" }, 804 { 0x00200000, "SHARED_MEMORY_SIZE" }, 805 { 0x02000000, "SHARED_CONFIG_TOO_SMALL" }, 806 { 0x04000000, "TOTAL_REGISTER_COUNT" }, 807 {} 808 }; 809 810 static const struct nvkm_bitfield gf100_gpc_rop_error[] = { 811 { 0x00000002, "RT_PITCH_OVERRUN" }, 812 { 0x00000010, "RT_WIDTH_OVERRUN" }, 813 { 0x00000020, "RT_HEIGHT_OVERRUN" }, 814 { 0x00000080, "ZETA_STORAGE_TYPE_MISMATCH" }, 815 { 0x00000100, "RT_STORAGE_TYPE_MISMATCH" }, 816 { 0x00000400, "RT_LINEAR_MISMATCH" }, 817 {} 818 }; 819 820 static void 821 gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc) 822 { 823 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 824 struct nvkm_device *device = subdev->device; 825 char error[128]; 826 u32 trap[4]; 827 828 trap[0] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0420)) & 0x3fffffff; 829 trap[1] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0434)); 830 trap[2] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0438)); 831 trap[3] = nvkm_rd32(device, GPC_UNIT(gpc, 0x043c)); 832 833 nvkm_snprintbf(error, sizeof(error), gf100_gpc_rop_error, trap[0]); 834 835 nvkm_error(subdev, "GPC%d/PROP trap: %08x [%s] x = %u, y = %u, " 836 "format = %x, storage type = %x\n", 837 gpc, trap[0], error, trap[1] & 0xffff, trap[1] >> 16, 838 (trap[2] >> 8) & 0x3f, trap[3] & 0xff); 839 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000); 840 } 841 842 static const struct nvkm_enum gf100_mp_warp_error[] = { 843 { 0x00, "NO_ERROR" }, 844 { 0x01, "STACK_MISMATCH" }, 845 { 0x05, "MISALIGNED_PC" }, 846 { 0x08, "MISALIGNED_GPR" }, 847 { 0x09, "INVALID_OPCODE" }, 848 { 0x0d, "GPR_OUT_OF_BOUNDS" }, 849 { 0x0e, "MEM_OUT_OF_BOUNDS" }, 850 { 0x0f, "UNALIGNED_MEM_ACCESS" }, 851 { 0x11, "INVALID_PARAM" }, 852 {} 853 }; 854 855 static const struct nvkm_bitfield gf100_mp_global_error[] = { 856 { 0x00000004, "MULTIPLE_WARP_ERRORS" }, 857 { 0x00000008, "OUT_OF_STACK_SPACE" }, 858 {} 859 }; 860 861 static void 862 gf100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc) 863 { 864 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 865 struct nvkm_device *device = subdev->device; 866 u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x648)); 867 u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x650)); 868 const struct nvkm_enum *warp; 869 char glob[128]; 870 871 nvkm_snprintbf(glob, sizeof(glob), gf100_mp_global_error, gerr); 872 warp = nvkm_enum_find(gf100_mp_warp_error, werr & 0xffff); 873 874 nvkm_error(subdev, "GPC%i/TPC%i/MP trap: " 875 "global %08x [%s] warp %04x [%s]\n", 876 gpc, tpc, gerr, glob, werr, warp ? warp->name : ""); 877 878 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x648), 0x00000000); 879 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x650), gerr); 880 } 881 882 static void 883 gf100_gr_trap_tpc(struct gf100_gr *gr, int gpc, int tpc) 884 { 885 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 886 struct nvkm_device *device = subdev->device; 887 u32 stat = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0508)); 888 889 if (stat & 0x00000001) { 890 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0224)); 891 nvkm_error(subdev, "GPC%d/TPC%d/TEX: %08x\n", gpc, tpc, trap); 892 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000); 893 stat &= ~0x00000001; 894 } 895 896 if (stat & 0x00000002) { 897 gf100_gr_trap_mp(gr, gpc, tpc); 898 stat &= ~0x00000002; 899 } 900 901 if (stat & 0x00000004) { 902 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0084)); 903 nvkm_error(subdev, "GPC%d/TPC%d/POLY: %08x\n", gpc, tpc, trap); 904 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000); 905 stat &= ~0x00000004; 906 } 907 908 if (stat & 0x00000008) { 909 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x048c)); 910 nvkm_error(subdev, "GPC%d/TPC%d/L1C: %08x\n", gpc, tpc, trap); 911 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000); 912 stat &= ~0x00000008; 913 } 914 915 if (stat) { 916 nvkm_error(subdev, "GPC%d/TPC%d/%08x: unknown\n", gpc, tpc, stat); 917 } 918 } 919 920 static void 921 gf100_gr_trap_gpc(struct gf100_gr *gr, int gpc) 922 { 923 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 924 struct nvkm_device *device = subdev->device; 925 u32 stat = nvkm_rd32(device, GPC_UNIT(gpc, 0x2c90)); 926 int tpc; 927 928 if (stat & 0x00000001) { 929 gf100_gr_trap_gpc_rop(gr, gpc); 930 stat &= ~0x00000001; 931 } 932 933 if (stat & 0x00000002) { 934 u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0900)); 935 nvkm_error(subdev, "GPC%d/ZCULL: %08x\n", gpc, trap); 936 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000); 937 stat &= ~0x00000002; 938 } 939 940 if (stat & 0x00000004) { 941 u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x1028)); 942 nvkm_error(subdev, "GPC%d/CCACHE: %08x\n", gpc, trap); 943 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000); 944 stat &= ~0x00000004; 945 } 946 947 if (stat & 0x00000008) { 948 u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0824)); 949 nvkm_error(subdev, "GPC%d/ESETUP: %08x\n", gpc, trap); 950 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000); 951 stat &= ~0x00000009; 952 } 953 954 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) { 955 u32 mask = 0x00010000 << tpc; 956 if (stat & mask) { 957 gf100_gr_trap_tpc(gr, gpc, tpc); 958 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), mask); 959 stat &= ~mask; 960 } 961 } 962 963 if (stat) { 964 nvkm_error(subdev, "GPC%d/%08x: unknown\n", gpc, stat); 965 } 966 } 967 968 static void 969 gf100_gr_trap_intr(struct gf100_gr *gr) 970 { 971 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 972 struct nvkm_device *device = subdev->device; 973 u32 trap = nvkm_rd32(device, 0x400108); 974 int rop, gpc; 975 976 if (trap & 0x00000001) { 977 u32 stat = nvkm_rd32(device, 0x404000); 978 nvkm_error(subdev, "DISPATCH %08x\n", stat); 979 nvkm_wr32(device, 0x404000, 0xc0000000); 980 nvkm_wr32(device, 0x400108, 0x00000001); 981 trap &= ~0x00000001; 982 } 983 984 if (trap & 0x00000002) { 985 u32 stat = nvkm_rd32(device, 0x404600); 986 nvkm_error(subdev, "M2MF %08x\n", stat); 987 nvkm_wr32(device, 0x404600, 0xc0000000); 988 nvkm_wr32(device, 0x400108, 0x00000002); 989 trap &= ~0x00000002; 990 } 991 992 if (trap & 0x00000008) { 993 u32 stat = nvkm_rd32(device, 0x408030); 994 nvkm_error(subdev, "CCACHE %08x\n", stat); 995 nvkm_wr32(device, 0x408030, 0xc0000000); 996 nvkm_wr32(device, 0x400108, 0x00000008); 997 trap &= ~0x00000008; 998 } 999 1000 if (trap & 0x00000010) { 1001 u32 stat = nvkm_rd32(device, 0x405840); 1002 nvkm_error(subdev, "SHADER %08x\n", stat); 1003 nvkm_wr32(device, 0x405840, 0xc0000000); 1004 nvkm_wr32(device, 0x400108, 0x00000010); 1005 trap &= ~0x00000010; 1006 } 1007 1008 if (trap & 0x00000040) { 1009 u32 stat = nvkm_rd32(device, 0x40601c); 1010 nvkm_error(subdev, "UNK6 %08x\n", stat); 1011 nvkm_wr32(device, 0x40601c, 0xc0000000); 1012 nvkm_wr32(device, 0x400108, 0x00000040); 1013 trap &= ~0x00000040; 1014 } 1015 1016 if (trap & 0x00000080) { 1017 u32 stat = nvkm_rd32(device, 0x404490); 1018 nvkm_error(subdev, "MACRO %08x\n", stat); 1019 nvkm_wr32(device, 0x404490, 0xc0000000); 1020 nvkm_wr32(device, 0x400108, 0x00000080); 1021 trap &= ~0x00000080; 1022 } 1023 1024 if (trap & 0x00000100) { 1025 u32 stat = nvkm_rd32(device, 0x407020) & 0x3fffffff; 1026 char sked[128]; 1027 1028 nvkm_snprintbf(sked, sizeof(sked), gk104_sked_error, stat); 1029 nvkm_error(subdev, "SKED: %08x [%s]\n", stat, sked); 1030 1031 if (stat) 1032 nvkm_wr32(device, 0x407020, 0x40000000); 1033 nvkm_wr32(device, 0x400108, 0x00000100); 1034 trap &= ~0x00000100; 1035 } 1036 1037 if (trap & 0x01000000) { 1038 u32 stat = nvkm_rd32(device, 0x400118); 1039 for (gpc = 0; stat && gpc < gr->gpc_nr; gpc++) { 1040 u32 mask = 0x00000001 << gpc; 1041 if (stat & mask) { 1042 gf100_gr_trap_gpc(gr, gpc); 1043 nvkm_wr32(device, 0x400118, mask); 1044 stat &= ~mask; 1045 } 1046 } 1047 nvkm_wr32(device, 0x400108, 0x01000000); 1048 trap &= ~0x01000000; 1049 } 1050 1051 if (trap & 0x02000000) { 1052 for (rop = 0; rop < gr->rop_nr; rop++) { 1053 u32 statz = nvkm_rd32(device, ROP_UNIT(rop, 0x070)); 1054 u32 statc = nvkm_rd32(device, ROP_UNIT(rop, 0x144)); 1055 nvkm_error(subdev, "ROP%d %08x %08x\n", 1056 rop, statz, statc); 1057 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000); 1058 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000); 1059 } 1060 nvkm_wr32(device, 0x400108, 0x02000000); 1061 trap &= ~0x02000000; 1062 } 1063 1064 if (trap) { 1065 nvkm_error(subdev, "TRAP UNHANDLED %08x\n", trap); 1066 nvkm_wr32(device, 0x400108, trap); 1067 } 1068 } 1069 1070 static void 1071 gf100_gr_ctxctl_debug_unit(struct gf100_gr *gr, u32 base) 1072 { 1073 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1074 struct nvkm_device *device = subdev->device; 1075 nvkm_error(subdev, "%06x - done %08x\n", base, 1076 nvkm_rd32(device, base + 0x400)); 1077 nvkm_error(subdev, "%06x - stat %08x %08x %08x %08x\n", base, 1078 nvkm_rd32(device, base + 0x800), 1079 nvkm_rd32(device, base + 0x804), 1080 nvkm_rd32(device, base + 0x808), 1081 nvkm_rd32(device, base + 0x80c)); 1082 nvkm_error(subdev, "%06x - stat %08x %08x %08x %08x\n", base, 1083 nvkm_rd32(device, base + 0x810), 1084 nvkm_rd32(device, base + 0x814), 1085 nvkm_rd32(device, base + 0x818), 1086 nvkm_rd32(device, base + 0x81c)); 1087 } 1088 1089 void 1090 gf100_gr_ctxctl_debug(struct gf100_gr *gr) 1091 { 1092 struct nvkm_device *device = gr->base.engine.subdev.device; 1093 u32 gpcnr = nvkm_rd32(device, 0x409604) & 0xffff; 1094 u32 gpc; 1095 1096 gf100_gr_ctxctl_debug_unit(gr, 0x409000); 1097 for (gpc = 0; gpc < gpcnr; gpc++) 1098 gf100_gr_ctxctl_debug_unit(gr, 0x502000 + (gpc * 0x8000)); 1099 } 1100 1101 static void 1102 gf100_gr_ctxctl_isr(struct gf100_gr *gr) 1103 { 1104 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1105 struct nvkm_device *device = subdev->device; 1106 u32 stat = nvkm_rd32(device, 0x409c18); 1107 1108 if (stat & 0x00000001) { 1109 u32 code = nvkm_rd32(device, 0x409814); 1110 if (code == E_BAD_FWMTHD) { 1111 u32 class = nvkm_rd32(device, 0x409808); 1112 u32 addr = nvkm_rd32(device, 0x40980c); 1113 u32 subc = (addr & 0x00070000) >> 16; 1114 u32 mthd = (addr & 0x00003ffc); 1115 u32 data = nvkm_rd32(device, 0x409810); 1116 1117 nvkm_error(subdev, "FECS MTHD subc %d class %04x " 1118 "mthd %04x data %08x\n", 1119 subc, class, mthd, data); 1120 1121 nvkm_wr32(device, 0x409c20, 0x00000001); 1122 stat &= ~0x00000001; 1123 } else { 1124 nvkm_error(subdev, "FECS ucode error %d\n", code); 1125 } 1126 } 1127 1128 if (stat & 0x00080000) { 1129 nvkm_error(subdev, "FECS watchdog timeout\n"); 1130 gf100_gr_ctxctl_debug(gr); 1131 nvkm_wr32(device, 0x409c20, 0x00080000); 1132 stat &= ~0x00080000; 1133 } 1134 1135 if (stat) { 1136 nvkm_error(subdev, "FECS %08x\n", stat); 1137 gf100_gr_ctxctl_debug(gr); 1138 nvkm_wr32(device, 0x409c20, stat); 1139 } 1140 } 1141 1142 static void 1143 gf100_gr_intr(struct nvkm_subdev *subdev) 1144 { 1145 struct gf100_gr *gr = (void *)subdev; 1146 struct nvkm_device *device = gr->base.engine.subdev.device; 1147 struct nvkm_fifo *fifo = device->fifo; 1148 struct nvkm_engine *engine = nv_engine(subdev); 1149 struct nvkm_object *engctx; 1150 struct nvkm_handle *handle; 1151 u64 inst = nvkm_rd32(device, 0x409b00) & 0x0fffffff; 1152 u32 stat = nvkm_rd32(device, 0x400100); 1153 u32 addr = nvkm_rd32(device, 0x400704); 1154 u32 mthd = (addr & 0x00003ffc); 1155 u32 subc = (addr & 0x00070000) >> 16; 1156 u32 data = nvkm_rd32(device, 0x400708); 1157 u32 code = nvkm_rd32(device, 0x400110); 1158 u32 class; 1159 int chid; 1160 1161 if (nv_device(gr)->card_type < NV_E0 || subc < 4) 1162 class = nvkm_rd32(device, 0x404200 + (subc * 4)); 1163 else 1164 class = 0x0000; 1165 1166 engctx = nvkm_engctx_get(engine, inst); 1167 chid = fifo->chid(fifo, engctx); 1168 1169 if (stat & 0x00000001) { 1170 /* 1171 * notifier interrupt, only needed for cyclestats 1172 * can be safely ignored 1173 */ 1174 nvkm_wr32(device, 0x400100, 0x00000001); 1175 stat &= ~0x00000001; 1176 } 1177 1178 if (stat & 0x00000010) { 1179 handle = nvkm_handle_get_class(engctx, class); 1180 if (!handle || nv_call(handle->object, mthd, data)) { 1181 nvkm_error(subdev, "ILLEGAL_MTHD ch %d [%010llx %s] " 1182 "subc %d class %04x mthd %04x data %08x\n", 1183 chid, inst << 12, nvkm_client_name(engctx), 1184 subc, class, mthd, data); 1185 } 1186 nvkm_handle_put(handle); 1187 nvkm_wr32(device, 0x400100, 0x00000010); 1188 stat &= ~0x00000010; 1189 } 1190 1191 if (stat & 0x00000020) { 1192 nvkm_error(subdev, "ILLEGAL_CLASS ch %d [%010llx %s] " 1193 "subc %d class %04x mthd %04x data %08x\n", 1194 chid, inst << 12, nvkm_client_name(engctx), subc, 1195 class, mthd, data); 1196 nvkm_wr32(device, 0x400100, 0x00000020); 1197 stat &= ~0x00000020; 1198 } 1199 1200 if (stat & 0x00100000) { 1201 const struct nvkm_enum *en = 1202 nvkm_enum_find(nv50_data_error_names, code); 1203 nvkm_error(subdev, "DATA_ERROR %08x [%s] ch %d [%010llx %s] " 1204 "subc %d class %04x mthd %04x data %08x\n", 1205 code, en ? en->name : "", chid, inst << 12, 1206 nvkm_client_name(engctx), subc, class, mthd, data); 1207 nvkm_wr32(device, 0x400100, 0x00100000); 1208 stat &= ~0x00100000; 1209 } 1210 1211 if (stat & 0x00200000) { 1212 nvkm_error(subdev, "TRAP ch %d [%010llx %s]\n", 1213 chid, inst << 12, 1214 nvkm_client_name(engctx)); 1215 gf100_gr_trap_intr(gr); 1216 nvkm_wr32(device, 0x400100, 0x00200000); 1217 stat &= ~0x00200000; 1218 } 1219 1220 if (stat & 0x00080000) { 1221 gf100_gr_ctxctl_isr(gr); 1222 nvkm_wr32(device, 0x400100, 0x00080000); 1223 stat &= ~0x00080000; 1224 } 1225 1226 if (stat) { 1227 nvkm_error(subdev, "intr %08x\n", stat); 1228 nvkm_wr32(device, 0x400100, stat); 1229 } 1230 1231 nvkm_wr32(device, 0x400500, 0x00010001); 1232 nvkm_engctx_put(engctx); 1233 } 1234 1235 void 1236 gf100_gr_init_fw(struct gf100_gr *gr, u32 fuc_base, 1237 struct gf100_gr_fuc *code, struct gf100_gr_fuc *data) 1238 { 1239 struct nvkm_device *device = gr->base.engine.subdev.device; 1240 int i; 1241 1242 nvkm_wr32(device, fuc_base + 0x01c0, 0x01000000); 1243 for (i = 0; i < data->size / 4; i++) 1244 nvkm_wr32(device, fuc_base + 0x01c4, data->data[i]); 1245 1246 nvkm_wr32(device, fuc_base + 0x0180, 0x01000000); 1247 for (i = 0; i < code->size / 4; i++) { 1248 if ((i & 0x3f) == 0) 1249 nvkm_wr32(device, fuc_base + 0x0188, i >> 6); 1250 nvkm_wr32(device, fuc_base + 0x0184, code->data[i]); 1251 } 1252 1253 /* code must be padded to 0x40 words */ 1254 for (; i & 0x3f; i++) 1255 nvkm_wr32(device, fuc_base + 0x0184, 0); 1256 } 1257 1258 static void 1259 gf100_gr_init_csdata(struct gf100_gr *gr, 1260 const struct gf100_gr_pack *pack, 1261 u32 falcon, u32 starstar, u32 base) 1262 { 1263 struct nvkm_device *device = gr->base.engine.subdev.device; 1264 const struct gf100_gr_pack *iter; 1265 const struct gf100_gr_init *init; 1266 u32 addr = ~0, prev = ~0, xfer = 0; 1267 u32 star, temp; 1268 1269 nvkm_wr32(device, falcon + 0x01c0, 0x02000000 + starstar); 1270 star = nvkm_rd32(device, falcon + 0x01c4); 1271 temp = nvkm_rd32(device, falcon + 0x01c4); 1272 if (temp > star) 1273 star = temp; 1274 nvkm_wr32(device, falcon + 0x01c0, 0x01000000 + star); 1275 1276 pack_for_each_init(init, iter, pack) { 1277 u32 head = init->addr - base; 1278 u32 tail = head + init->count * init->pitch; 1279 while (head < tail) { 1280 if (head != prev + 4 || xfer >= 32) { 1281 if (xfer) { 1282 u32 data = ((--xfer << 26) | addr); 1283 nvkm_wr32(device, falcon + 0x01c4, data); 1284 star += 4; 1285 } 1286 addr = head; 1287 xfer = 0; 1288 } 1289 prev = head; 1290 xfer = xfer + 1; 1291 head = head + init->pitch; 1292 } 1293 } 1294 1295 nvkm_wr32(device, falcon + 0x01c4, (--xfer << 26) | addr); 1296 nvkm_wr32(device, falcon + 0x01c0, 0x01000004 + starstar); 1297 nvkm_wr32(device, falcon + 0x01c4, star + 4); 1298 } 1299 1300 int 1301 gf100_gr_init_ctxctl(struct gf100_gr *gr) 1302 { 1303 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1304 struct nvkm_device *device = subdev->device; 1305 struct gf100_gr_oclass *oclass = (void *)nv_object(gr)->oclass; 1306 struct gf100_grctx_oclass *cclass = (void *)nv_engine(gr)->cclass; 1307 int i; 1308 1309 if (gr->firmware) { 1310 /* load fuc microcode */ 1311 nvkm_mc(gr)->unk260(nvkm_mc(gr), 0); 1312 gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c, 1313 &gr->fuc409d); 1314 gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac, 1315 &gr->fuc41ad); 1316 nvkm_mc(gr)->unk260(nvkm_mc(gr), 1); 1317 1318 /* start both of them running */ 1319 nvkm_wr32(device, 0x409840, 0xffffffff); 1320 nvkm_wr32(device, 0x41a10c, 0x00000000); 1321 nvkm_wr32(device, 0x40910c, 0x00000000); 1322 nvkm_wr32(device, 0x41a100, 0x00000002); 1323 nvkm_wr32(device, 0x409100, 0x00000002); 1324 if (nvkm_msec(device, 2000, 1325 if (nvkm_rd32(device, 0x409800) & 0x00000001) 1326 break; 1327 ) < 0) 1328 return -EBUSY; 1329 1330 nvkm_wr32(device, 0x409840, 0xffffffff); 1331 nvkm_wr32(device, 0x409500, 0x7fffffff); 1332 nvkm_wr32(device, 0x409504, 0x00000021); 1333 1334 nvkm_wr32(device, 0x409840, 0xffffffff); 1335 nvkm_wr32(device, 0x409500, 0x00000000); 1336 nvkm_wr32(device, 0x409504, 0x00000010); 1337 if (nvkm_msec(device, 2000, 1338 if ((gr->size = nvkm_rd32(device, 0x409800))) 1339 break; 1340 ) < 0) 1341 return -EBUSY; 1342 1343 nvkm_wr32(device, 0x409840, 0xffffffff); 1344 nvkm_wr32(device, 0x409500, 0x00000000); 1345 nvkm_wr32(device, 0x409504, 0x00000016); 1346 if (nvkm_msec(device, 2000, 1347 if (nvkm_rd32(device, 0x409800)) 1348 break; 1349 ) < 0) 1350 return -EBUSY; 1351 1352 nvkm_wr32(device, 0x409840, 0xffffffff); 1353 nvkm_wr32(device, 0x409500, 0x00000000); 1354 nvkm_wr32(device, 0x409504, 0x00000025); 1355 if (nvkm_msec(device, 2000, 1356 if (nvkm_rd32(device, 0x409800)) 1357 break; 1358 ) < 0) 1359 return -EBUSY; 1360 1361 if (nv_device(gr)->chipset >= 0xe0) { 1362 nvkm_wr32(device, 0x409800, 0x00000000); 1363 nvkm_wr32(device, 0x409500, 0x00000001); 1364 nvkm_wr32(device, 0x409504, 0x00000030); 1365 if (nvkm_msec(device, 2000, 1366 if (nvkm_rd32(device, 0x409800)) 1367 break; 1368 ) < 0) 1369 return -EBUSY; 1370 1371 nvkm_wr32(device, 0x409810, 0xb00095c8); 1372 nvkm_wr32(device, 0x409800, 0x00000000); 1373 nvkm_wr32(device, 0x409500, 0x00000001); 1374 nvkm_wr32(device, 0x409504, 0x00000031); 1375 if (nvkm_msec(device, 2000, 1376 if (nvkm_rd32(device, 0x409800)) 1377 break; 1378 ) < 0) 1379 return -EBUSY; 1380 1381 nvkm_wr32(device, 0x409810, 0x00080420); 1382 nvkm_wr32(device, 0x409800, 0x00000000); 1383 nvkm_wr32(device, 0x409500, 0x00000001); 1384 nvkm_wr32(device, 0x409504, 0x00000032); 1385 if (nvkm_msec(device, 2000, 1386 if (nvkm_rd32(device, 0x409800)) 1387 break; 1388 ) < 0) 1389 return -EBUSY; 1390 1391 nvkm_wr32(device, 0x409614, 0x00000070); 1392 nvkm_wr32(device, 0x409614, 0x00000770); 1393 nvkm_wr32(device, 0x40802c, 0x00000001); 1394 } 1395 1396 if (gr->data == NULL) { 1397 int ret = gf100_grctx_generate(gr); 1398 if (ret) { 1399 nvkm_error(subdev, "failed to construct context\n"); 1400 return ret; 1401 } 1402 } 1403 1404 return 0; 1405 } else 1406 if (!oclass->fecs.ucode) { 1407 return -ENOSYS; 1408 } 1409 1410 /* load HUB microcode */ 1411 nvkm_mc(gr)->unk260(nvkm_mc(gr), 0); 1412 nvkm_wr32(device, 0x4091c0, 0x01000000); 1413 for (i = 0; i < oclass->fecs.ucode->data.size / 4; i++) 1414 nvkm_wr32(device, 0x4091c4, oclass->fecs.ucode->data.data[i]); 1415 1416 nvkm_wr32(device, 0x409180, 0x01000000); 1417 for (i = 0; i < oclass->fecs.ucode->code.size / 4; i++) { 1418 if ((i & 0x3f) == 0) 1419 nvkm_wr32(device, 0x409188, i >> 6); 1420 nvkm_wr32(device, 0x409184, oclass->fecs.ucode->code.data[i]); 1421 } 1422 1423 /* load GPC microcode */ 1424 nvkm_wr32(device, 0x41a1c0, 0x01000000); 1425 for (i = 0; i < oclass->gpccs.ucode->data.size / 4; i++) 1426 nvkm_wr32(device, 0x41a1c4, oclass->gpccs.ucode->data.data[i]); 1427 1428 nvkm_wr32(device, 0x41a180, 0x01000000); 1429 for (i = 0; i < oclass->gpccs.ucode->code.size / 4; i++) { 1430 if ((i & 0x3f) == 0) 1431 nvkm_wr32(device, 0x41a188, i >> 6); 1432 nvkm_wr32(device, 0x41a184, oclass->gpccs.ucode->code.data[i]); 1433 } 1434 nvkm_mc(gr)->unk260(nvkm_mc(gr), 1); 1435 1436 /* load register lists */ 1437 gf100_gr_init_csdata(gr, cclass->hub, 0x409000, 0x000, 0x000000); 1438 gf100_gr_init_csdata(gr, cclass->gpc, 0x41a000, 0x000, 0x418000); 1439 gf100_gr_init_csdata(gr, cclass->tpc, 0x41a000, 0x004, 0x419800); 1440 gf100_gr_init_csdata(gr, cclass->ppc, 0x41a000, 0x008, 0x41be00); 1441 1442 /* start HUB ucode running, it'll init the GPCs */ 1443 nvkm_wr32(device, 0x40910c, 0x00000000); 1444 nvkm_wr32(device, 0x409100, 0x00000002); 1445 if (nvkm_msec(device, 2000, 1446 if (nvkm_rd32(device, 0x409800) & 0x80000000) 1447 break; 1448 ) < 0) { 1449 gf100_gr_ctxctl_debug(gr); 1450 return -EBUSY; 1451 } 1452 1453 gr->size = nvkm_rd32(device, 0x409804); 1454 if (gr->data == NULL) { 1455 int ret = gf100_grctx_generate(gr); 1456 if (ret) { 1457 nvkm_error(subdev, "failed to construct context\n"); 1458 return ret; 1459 } 1460 } 1461 1462 return 0; 1463 } 1464 1465 int 1466 gf100_gr_init(struct nvkm_object *object) 1467 { 1468 struct gf100_gr *gr = (void *)object; 1469 struct nvkm_device *device = gr->base.engine.subdev.device; 1470 struct gf100_gr_oclass *oclass = (void *)object->oclass; 1471 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total); 1472 u32 data[TPC_MAX / 8] = {}; 1473 u8 tpcnr[GPC_MAX]; 1474 int gpc, tpc, rop; 1475 int ret, i; 1476 1477 ret = nvkm_gr_init(&gr->base); 1478 if (ret) 1479 return ret; 1480 1481 nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000); 1482 nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000); 1483 nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000); 1484 nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000); 1485 nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000); 1486 nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000); 1487 nvkm_wr32(device, GPC_BCAST(0x08b4), gr->unk4188b4->addr >> 8); 1488 nvkm_wr32(device, GPC_BCAST(0x08b8), gr->unk4188b8->addr >> 8); 1489 1490 gf100_gr_mmio(gr, oclass->mmio); 1491 1492 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr)); 1493 for (i = 0, gpc = -1; i < gr->tpc_total; i++) { 1494 do { 1495 gpc = (gpc + 1) % gr->gpc_nr; 1496 } while (!tpcnr[gpc]); 1497 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--; 1498 1499 data[i / 8] |= tpc << ((i % 8) * 4); 1500 } 1501 1502 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]); 1503 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]); 1504 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]); 1505 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]); 1506 1507 for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 1508 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914), 1509 gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]); 1510 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 | 1511 gr->tpc_total); 1512 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918); 1513 } 1514 1515 if (nv_device(gr)->chipset != 0xd7) 1516 nvkm_wr32(device, GPC_BCAST(0x1bd4), magicgpc918); 1517 else 1518 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918); 1519 1520 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800)); 1521 1522 nvkm_wr32(device, 0x400500, 0x00010001); 1523 1524 nvkm_wr32(device, 0x400100, 0xffffffff); 1525 nvkm_wr32(device, 0x40013c, 0xffffffff); 1526 1527 nvkm_wr32(device, 0x409c24, 0x000f0000); 1528 nvkm_wr32(device, 0x404000, 0xc0000000); 1529 nvkm_wr32(device, 0x404600, 0xc0000000); 1530 nvkm_wr32(device, 0x408030, 0xc0000000); 1531 nvkm_wr32(device, 0x40601c, 0xc0000000); 1532 nvkm_wr32(device, 0x404490, 0xc0000000); 1533 nvkm_wr32(device, 0x406018, 0xc0000000); 1534 nvkm_wr32(device, 0x405840, 0xc0000000); 1535 nvkm_wr32(device, 0x405844, 0x00ffffff); 1536 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008); 1537 nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000); 1538 1539 for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 1540 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000); 1541 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000); 1542 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000); 1543 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000); 1544 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) { 1545 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff); 1546 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff); 1547 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000); 1548 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000); 1549 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000); 1550 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe); 1551 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f); 1552 } 1553 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff); 1554 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff); 1555 } 1556 1557 for (rop = 0; rop < gr->rop_nr; rop++) { 1558 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000); 1559 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000); 1560 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff); 1561 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff); 1562 } 1563 1564 nvkm_wr32(device, 0x400108, 0xffffffff); 1565 nvkm_wr32(device, 0x400138, 0xffffffff); 1566 nvkm_wr32(device, 0x400118, 0xffffffff); 1567 nvkm_wr32(device, 0x400130, 0xffffffff); 1568 nvkm_wr32(device, 0x40011c, 0xffffffff); 1569 nvkm_wr32(device, 0x400134, 0xffffffff); 1570 1571 nvkm_wr32(device, 0x400054, 0x34ce3464); 1572 1573 gf100_gr_zbc_init(gr); 1574 1575 return gf100_gr_init_ctxctl(gr); 1576 } 1577 1578 void 1579 gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc) 1580 { 1581 kfree(fuc->data); 1582 fuc->data = NULL; 1583 } 1584 1585 int 1586 gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname, 1587 struct gf100_gr_fuc *fuc) 1588 { 1589 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1590 struct nvkm_device *device = subdev->device; 1591 const struct firmware *fw; 1592 char f[64]; 1593 char cname[16]; 1594 int ret; 1595 int i; 1596 1597 /* Convert device name to lowercase */ 1598 strncpy(cname, device->cname, sizeof(cname)); 1599 cname[sizeof(cname) - 1] = '\0'; 1600 i = strlen(cname); 1601 while (i) { 1602 --i; 1603 cname[i] = tolower(cname[i]); 1604 } 1605 1606 snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname); 1607 ret = request_firmware(&fw, f, nv_device_base(device)); 1608 if (ret) { 1609 nvkm_error(subdev, "failed to load %s\n", fwname); 1610 return ret; 1611 } 1612 1613 fuc->size = fw->size; 1614 fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL); 1615 release_firmware(fw); 1616 return (fuc->data != NULL) ? 0 : -ENOMEM; 1617 } 1618 1619 void 1620 gf100_gr_dtor(struct nvkm_object *object) 1621 { 1622 struct gf100_gr *gr = (void *)object; 1623 1624 kfree(gr->data); 1625 1626 gf100_gr_dtor_fw(&gr->fuc409c); 1627 gf100_gr_dtor_fw(&gr->fuc409d); 1628 gf100_gr_dtor_fw(&gr->fuc41ac); 1629 gf100_gr_dtor_fw(&gr->fuc41ad); 1630 1631 nvkm_gpuobj_ref(NULL, &gr->unk4188b8); 1632 nvkm_gpuobj_ref(NULL, &gr->unk4188b4); 1633 1634 nvkm_gr_destroy(&gr->base); 1635 } 1636 1637 int 1638 gf100_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 1639 struct nvkm_oclass *bclass, void *data, u32 size, 1640 struct nvkm_object **pobject) 1641 { 1642 struct gf100_gr_oclass *oclass = (void *)bclass; 1643 struct nvkm_device *device = (void *)parent; 1644 struct gf100_gr *gr; 1645 bool use_ext_fw, enable; 1646 int ret, i, j; 1647 1648 use_ext_fw = nvkm_boolopt(device->cfgopt, "NvGrUseFW", 1649 oclass->fecs.ucode == NULL); 1650 enable = use_ext_fw || oclass->fecs.ucode != NULL; 1651 1652 ret = nvkm_gr_create(parent, engine, bclass, enable, &gr); 1653 *pobject = nv_object(gr); 1654 if (ret) 1655 return ret; 1656 1657 nv_subdev(gr)->unit = 0x08001000; 1658 nv_subdev(gr)->intr = gf100_gr_intr; 1659 1660 gr->base.units = gf100_gr_units; 1661 1662 if (use_ext_fw) { 1663 nvkm_info(&gr->base.engine.subdev, "using external firmware\n"); 1664 if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) || 1665 gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) || 1666 gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) || 1667 gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad)) 1668 return -ENODEV; 1669 gr->firmware = true; 1670 } 1671 1672 ret = nvkm_gpuobj_new(nv_object(gr), NULL, 0x1000, 256, 0, 1673 &gr->unk4188b4); 1674 if (ret) 1675 return ret; 1676 1677 ret = nvkm_gpuobj_new(nv_object(gr), NULL, 0x1000, 256, 0, 1678 &gr->unk4188b8); 1679 if (ret) 1680 return ret; 1681 1682 for (i = 0; i < 0x1000; i += 4) { 1683 nv_wo32(gr->unk4188b4, i, 0x00000010); 1684 nv_wo32(gr->unk4188b8, i, 0x00000010); 1685 } 1686 1687 gr->rop_nr = (nvkm_rd32(device, 0x409604) & 0x001f0000) >> 16; 1688 gr->gpc_nr = nvkm_rd32(device, 0x409604) & 0x0000001f; 1689 for (i = 0; i < gr->gpc_nr; i++) { 1690 gr->tpc_nr[i] = nvkm_rd32(device, GPC_UNIT(i, 0x2608)); 1691 gr->tpc_total += gr->tpc_nr[i]; 1692 gr->ppc_nr[i] = oclass->ppc_nr; 1693 for (j = 0; j < gr->ppc_nr[i]; j++) { 1694 u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4))); 1695 gr->ppc_tpc_nr[i][j] = hweight8(mask); 1696 } 1697 } 1698 1699 /*XXX: these need figuring out... though it might not even matter */ 1700 switch (nv_device(gr)->chipset) { 1701 case 0xc0: 1702 if (gr->tpc_total == 11) { /* 465, 3/4/4/0, 4 */ 1703 gr->magic_not_rop_nr = 0x07; 1704 } else 1705 if (gr->tpc_total == 14) { /* 470, 3/3/4/4, 5 */ 1706 gr->magic_not_rop_nr = 0x05; 1707 } else 1708 if (gr->tpc_total == 15) { /* 480, 3/4/4/4, 6 */ 1709 gr->magic_not_rop_nr = 0x06; 1710 } 1711 break; 1712 case 0xc3: /* 450, 4/0/0/0, 2 */ 1713 gr->magic_not_rop_nr = 0x03; 1714 break; 1715 case 0xc4: /* 460, 3/4/0/0, 4 */ 1716 gr->magic_not_rop_nr = 0x01; 1717 break; 1718 case 0xc1: /* 2/0/0/0, 1 */ 1719 gr->magic_not_rop_nr = 0x01; 1720 break; 1721 case 0xc8: /* 4/4/3/4, 5 */ 1722 gr->magic_not_rop_nr = 0x06; 1723 break; 1724 case 0xce: /* 4/4/0/0, 4 */ 1725 gr->magic_not_rop_nr = 0x03; 1726 break; 1727 case 0xcf: /* 4/0/0/0, 3 */ 1728 gr->magic_not_rop_nr = 0x03; 1729 break; 1730 case 0xd7: 1731 case 0xd9: /* 1/0/0/0, 1 */ 1732 case 0xea: /* gk20a */ 1733 case 0x12b: /* gm20b */ 1734 gr->magic_not_rop_nr = 0x01; 1735 break; 1736 } 1737 1738 nv_engine(gr)->cclass = *oclass->cclass; 1739 nv_engine(gr)->sclass = oclass->sclass; 1740 return 0; 1741 } 1742 1743 #include "fuc/hubgf100.fuc3.h" 1744 1745 struct gf100_gr_ucode 1746 gf100_gr_fecs_ucode = { 1747 .code.data = gf100_grhub_code, 1748 .code.size = sizeof(gf100_grhub_code), 1749 .data.data = gf100_grhub_data, 1750 .data.size = sizeof(gf100_grhub_data), 1751 }; 1752 1753 #include "fuc/gpcgf100.fuc3.h" 1754 1755 struct gf100_gr_ucode 1756 gf100_gr_gpccs_ucode = { 1757 .code.data = gf100_grgpc_code, 1758 .code.size = sizeof(gf100_grgpc_code), 1759 .data.data = gf100_grgpc_data, 1760 .data.size = sizeof(gf100_grgpc_data), 1761 }; 1762 1763 struct nvkm_oclass * 1764 gf100_gr_oclass = &(struct gf100_gr_oclass) { 1765 .base.handle = NV_ENGINE(GR, 0xc0), 1766 .base.ofuncs = &(struct nvkm_ofuncs) { 1767 .ctor = gf100_gr_ctor, 1768 .dtor = gf100_gr_dtor, 1769 .init = gf100_gr_init, 1770 .fini = _nvkm_gr_fini, 1771 }, 1772 .cclass = &gf100_grctx_oclass, 1773 .sclass = gf100_gr_sclass, 1774 .mmio = gf100_gr_pack_mmio, 1775 .fecs.ucode = &gf100_gr_fecs_ucode, 1776 .gpccs.ucode = &gf100_gr_gpccs_ucode, 1777 }.base; 1778