1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "gf100.h" 25 #include "ctxgf100.h" 26 #include "fuc/os.h" 27 28 #include <core/client.h> 29 #include <core/option.h> 30 #include <subdev/fb.h> 31 #include <subdev/mc.h> 32 #include <subdev/pmu.h> 33 #include <subdev/timer.h> 34 #include <engine/fifo.h> 35 36 #include <nvif/class.h> 37 #include <nvif/unpack.h> 38 39 /******************************************************************************* 40 * Zero Bandwidth Clear 41 ******************************************************************************/ 42 43 static void 44 gf100_gr_zbc_clear_color(struct gf100_gr *gr, int zbc) 45 { 46 struct nvkm_device *device = gr->base.engine.subdev.device; 47 if (gr->zbc_color[zbc].format) { 48 nvkm_wr32(device, 0x405804, gr->zbc_color[zbc].ds[0]); 49 nvkm_wr32(device, 0x405808, gr->zbc_color[zbc].ds[1]); 50 nvkm_wr32(device, 0x40580c, gr->zbc_color[zbc].ds[2]); 51 nvkm_wr32(device, 0x405810, gr->zbc_color[zbc].ds[3]); 52 } 53 nvkm_wr32(device, 0x405814, gr->zbc_color[zbc].format); 54 nvkm_wr32(device, 0x405820, zbc); 55 nvkm_wr32(device, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */ 56 } 57 58 static int 59 gf100_gr_zbc_color_get(struct gf100_gr *gr, int format, 60 const u32 ds[4], const u32 l2[4]) 61 { 62 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc; 63 int zbc = -ENOSPC, i; 64 65 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) { 66 if (gr->zbc_color[i].format) { 67 if (gr->zbc_color[i].format != format) 68 continue; 69 if (memcmp(gr->zbc_color[i].ds, ds, sizeof( 70 gr->zbc_color[i].ds))) 71 continue; 72 if (memcmp(gr->zbc_color[i].l2, l2, sizeof( 73 gr->zbc_color[i].l2))) { 74 WARN_ON(1); 75 return -EINVAL; 76 } 77 return i; 78 } else { 79 zbc = (zbc < 0) ? i : zbc; 80 } 81 } 82 83 if (zbc < 0) 84 return zbc; 85 86 memcpy(gr->zbc_color[zbc].ds, ds, sizeof(gr->zbc_color[zbc].ds)); 87 memcpy(gr->zbc_color[zbc].l2, l2, sizeof(gr->zbc_color[zbc].l2)); 88 gr->zbc_color[zbc].format = format; 89 nvkm_ltc_zbc_color_get(ltc, zbc, l2); 90 gf100_gr_zbc_clear_color(gr, zbc); 91 return zbc; 92 } 93 94 static void 95 gf100_gr_zbc_clear_depth(struct gf100_gr *gr, int zbc) 96 { 97 struct nvkm_device *device = gr->base.engine.subdev.device; 98 if (gr->zbc_depth[zbc].format) 99 nvkm_wr32(device, 0x405818, gr->zbc_depth[zbc].ds); 100 nvkm_wr32(device, 0x40581c, gr->zbc_depth[zbc].format); 101 nvkm_wr32(device, 0x405820, zbc); 102 nvkm_wr32(device, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */ 103 } 104 105 static int 106 gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format, 107 const u32 ds, const u32 l2) 108 { 109 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc; 110 int zbc = -ENOSPC, i; 111 112 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) { 113 if (gr->zbc_depth[i].format) { 114 if (gr->zbc_depth[i].format != format) 115 continue; 116 if (gr->zbc_depth[i].ds != ds) 117 continue; 118 if (gr->zbc_depth[i].l2 != l2) { 119 WARN_ON(1); 120 return -EINVAL; 121 } 122 return i; 123 } else { 124 zbc = (zbc < 0) ? i : zbc; 125 } 126 } 127 128 if (zbc < 0) 129 return zbc; 130 131 gr->zbc_depth[zbc].format = format; 132 gr->zbc_depth[zbc].ds = ds; 133 gr->zbc_depth[zbc].l2 = l2; 134 nvkm_ltc_zbc_depth_get(ltc, zbc, l2); 135 gf100_gr_zbc_clear_depth(gr, zbc); 136 return zbc; 137 } 138 139 /******************************************************************************* 140 * Graphics object classes 141 ******************************************************************************/ 142 143 static int 144 gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) 145 { 146 struct gf100_gr *gr = (void *)object->engine; 147 union { 148 struct fermi_a_zbc_color_v0 v0; 149 } *args = data; 150 int ret; 151 152 if (nvif_unpack(args->v0, 0, 0, false)) { 153 switch (args->v0.format) { 154 case FERMI_A_ZBC_COLOR_V0_FMT_ZERO: 155 case FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE: 156 case FERMI_A_ZBC_COLOR_V0_FMT_RF32_GF32_BF32_AF32: 157 case FERMI_A_ZBC_COLOR_V0_FMT_R16_G16_B16_A16: 158 case FERMI_A_ZBC_COLOR_V0_FMT_RN16_GN16_BN16_AN16: 159 case FERMI_A_ZBC_COLOR_V0_FMT_RS16_GS16_BS16_AS16: 160 case FERMI_A_ZBC_COLOR_V0_FMT_RU16_GU16_BU16_AU16: 161 case FERMI_A_ZBC_COLOR_V0_FMT_RF16_GF16_BF16_AF16: 162 case FERMI_A_ZBC_COLOR_V0_FMT_A8R8G8B8: 163 case FERMI_A_ZBC_COLOR_V0_FMT_A8RL8GL8BL8: 164 case FERMI_A_ZBC_COLOR_V0_FMT_A2B10G10R10: 165 case FERMI_A_ZBC_COLOR_V0_FMT_AU2BU10GU10RU10: 166 case FERMI_A_ZBC_COLOR_V0_FMT_A8B8G8R8: 167 case FERMI_A_ZBC_COLOR_V0_FMT_A8BL8GL8RL8: 168 case FERMI_A_ZBC_COLOR_V0_FMT_AN8BN8GN8RN8: 169 case FERMI_A_ZBC_COLOR_V0_FMT_AS8BS8GS8RS8: 170 case FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8: 171 case FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10: 172 case FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11: 173 ret = gf100_gr_zbc_color_get(gr, args->v0.format, 174 args->v0.ds, 175 args->v0.l2); 176 if (ret >= 0) { 177 args->v0.index = ret; 178 return 0; 179 } 180 break; 181 default: 182 return -EINVAL; 183 } 184 } 185 186 return ret; 187 } 188 189 static int 190 gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size) 191 { 192 struct gf100_gr *gr = (void *)object->engine; 193 union { 194 struct fermi_a_zbc_depth_v0 v0; 195 } *args = data; 196 int ret; 197 198 if (nvif_unpack(args->v0, 0, 0, false)) { 199 switch (args->v0.format) { 200 case FERMI_A_ZBC_DEPTH_V0_FMT_FP32: 201 ret = gf100_gr_zbc_depth_get(gr, args->v0.format, 202 args->v0.ds, 203 args->v0.l2); 204 return (ret >= 0) ? 0 : -ENOSPC; 205 default: 206 return -EINVAL; 207 } 208 } 209 210 return ret; 211 } 212 213 static int 214 gf100_fermi_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) 215 { 216 switch (mthd) { 217 case FERMI_A_ZBC_COLOR: 218 return gf100_fermi_mthd_zbc_color(object, data, size); 219 case FERMI_A_ZBC_DEPTH: 220 return gf100_fermi_mthd_zbc_depth(object, data, size); 221 default: 222 break; 223 } 224 return -EINVAL; 225 } 226 227 const struct nvkm_object_func 228 gf100_fermi = { 229 .mthd = gf100_fermi_mthd, 230 }; 231 232 static void 233 gf100_gr_mthd_set_shader_exceptions(struct nvkm_device *device, u32 data) 234 { 235 nvkm_wr32(device, 0x419e44, data ? 0xffffffff : 0x00000000); 236 nvkm_wr32(device, 0x419e4c, data ? 0xffffffff : 0x00000000); 237 } 238 239 static bool 240 gf100_gr_mthd_sw(struct nvkm_device *device, u16 class, u32 mthd, u32 data) 241 { 242 switch (class & 0x00ff) { 243 case 0x97: 244 case 0xc0: 245 switch (mthd) { 246 case 0x1528: 247 gf100_gr_mthd_set_shader_exceptions(device, data); 248 return true; 249 default: 250 break; 251 } 252 break; 253 default: 254 break; 255 } 256 return false; 257 } 258 259 static int 260 gf100_gr_object_get(struct nvkm_gr *base, int index, struct nvkm_sclass *sclass) 261 { 262 struct gf100_gr *gr = gf100_gr(base); 263 int c = 0; 264 265 while (gr->func->sclass[c].oclass) { 266 if (c++ == index) { 267 *sclass = gr->func->sclass[index]; 268 return index; 269 } 270 } 271 272 return c; 273 } 274 275 /******************************************************************************* 276 * PGRAPH context 277 ******************************************************************************/ 278 279 static int 280 gf100_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent, 281 int align, struct nvkm_gpuobj **pgpuobj) 282 { 283 struct gf100_gr_chan *chan = gf100_gr_chan(object); 284 struct gf100_gr *gr = chan->gr; 285 int ret, i; 286 287 ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size, 288 align, false, parent, pgpuobj); 289 if (ret) 290 return ret; 291 292 nvkm_kmap(*pgpuobj); 293 for (i = 0; i < gr->size; i += 4) 294 nvkm_wo32(*pgpuobj, i, gr->data[i / 4]); 295 296 if (!gr->firmware) { 297 nvkm_wo32(*pgpuobj, 0x00, chan->mmio_nr / 2); 298 nvkm_wo32(*pgpuobj, 0x04, chan->mmio_vma.offset >> 8); 299 } else { 300 nvkm_wo32(*pgpuobj, 0xf4, 0); 301 nvkm_wo32(*pgpuobj, 0xf8, 0); 302 nvkm_wo32(*pgpuobj, 0x10, chan->mmio_nr / 2); 303 nvkm_wo32(*pgpuobj, 0x14, lower_32_bits(chan->mmio_vma.offset)); 304 nvkm_wo32(*pgpuobj, 0x18, upper_32_bits(chan->mmio_vma.offset)); 305 nvkm_wo32(*pgpuobj, 0x1c, 1); 306 nvkm_wo32(*pgpuobj, 0x20, 0); 307 nvkm_wo32(*pgpuobj, 0x28, 0); 308 nvkm_wo32(*pgpuobj, 0x2c, 0); 309 } 310 nvkm_done(*pgpuobj); 311 return 0; 312 } 313 314 static void * 315 gf100_gr_chan_dtor(struct nvkm_object *object) 316 { 317 struct gf100_gr_chan *chan = gf100_gr_chan(object); 318 int i; 319 320 for (i = 0; i < ARRAY_SIZE(chan->data); i++) { 321 if (chan->data[i].vma.node) { 322 nvkm_vm_unmap(&chan->data[i].vma); 323 nvkm_vm_put(&chan->data[i].vma); 324 } 325 nvkm_memory_del(&chan->data[i].mem); 326 } 327 328 if (chan->mmio_vma.node) { 329 nvkm_vm_unmap(&chan->mmio_vma); 330 nvkm_vm_put(&chan->mmio_vma); 331 } 332 nvkm_memory_del(&chan->mmio); 333 return chan; 334 } 335 336 static const struct nvkm_object_func 337 gf100_gr_chan = { 338 .dtor = gf100_gr_chan_dtor, 339 .bind = gf100_gr_chan_bind, 340 }; 341 342 static int 343 gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch, 344 const struct nvkm_oclass *oclass, 345 struct nvkm_object **pobject) 346 { 347 struct gf100_gr *gr = gf100_gr(base); 348 struct gf100_gr_data *data = gr->mmio_data; 349 struct gf100_gr_mmio *mmio = gr->mmio_list; 350 struct gf100_gr_chan *chan; 351 struct nvkm_device *device = gr->base.engine.subdev.device; 352 int ret, i; 353 354 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) 355 return -ENOMEM; 356 nvkm_object_ctor(&gf100_gr_chan, oclass, &chan->object); 357 chan->gr = gr; 358 *pobject = &chan->object; 359 360 /* allocate memory for a "mmio list" buffer that's used by the HUB 361 * fuc to modify some per-context register settings on first load 362 * of the context. 363 */ 364 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x100, 365 false, &chan->mmio); 366 if (ret) 367 return ret; 368 369 ret = nvkm_vm_get(fifoch->vm, 0x1000, 12, NV_MEM_ACCESS_RW | 370 NV_MEM_ACCESS_SYS, &chan->mmio_vma); 371 if (ret) 372 return ret; 373 374 nvkm_memory_map(chan->mmio, &chan->mmio_vma, 0); 375 376 /* allocate buffers referenced by mmio list */ 377 for (i = 0; data->size && i < ARRAY_SIZE(gr->mmio_data); i++) { 378 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 379 data->size, data->align, false, 380 &chan->data[i].mem); 381 if (ret) 382 return ret; 383 384 ret = nvkm_vm_get(fifoch->vm, 385 nvkm_memory_size(chan->data[i].mem), 12, 386 data->access, &chan->data[i].vma); 387 if (ret) 388 return ret; 389 390 nvkm_memory_map(chan->data[i].mem, &chan->data[i].vma, 0); 391 data++; 392 } 393 394 /* finally, fill in the mmio list and point the context at it */ 395 nvkm_kmap(chan->mmio); 396 for (i = 0; mmio->addr && i < ARRAY_SIZE(gr->mmio_list); i++) { 397 u32 addr = mmio->addr; 398 u32 data = mmio->data; 399 400 if (mmio->buffer >= 0) { 401 u64 info = chan->data[mmio->buffer].vma.offset; 402 data |= info >> mmio->shift; 403 } 404 405 nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, addr); 406 nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, data); 407 mmio++; 408 } 409 nvkm_done(chan->mmio); 410 return 0; 411 } 412 413 /******************************************************************************* 414 * PGRAPH register lists 415 ******************************************************************************/ 416 417 const struct gf100_gr_init 418 gf100_gr_init_main_0[] = { 419 { 0x400080, 1, 0x04, 0x003083c2 }, 420 { 0x400088, 1, 0x04, 0x00006fe7 }, 421 { 0x40008c, 1, 0x04, 0x00000000 }, 422 { 0x400090, 1, 0x04, 0x00000030 }, 423 { 0x40013c, 1, 0x04, 0x013901f7 }, 424 { 0x400140, 1, 0x04, 0x00000100 }, 425 { 0x400144, 1, 0x04, 0x00000000 }, 426 { 0x400148, 1, 0x04, 0x00000110 }, 427 { 0x400138, 1, 0x04, 0x00000000 }, 428 { 0x400130, 2, 0x04, 0x00000000 }, 429 { 0x400124, 1, 0x04, 0x00000002 }, 430 {} 431 }; 432 433 const struct gf100_gr_init 434 gf100_gr_init_fe_0[] = { 435 { 0x40415c, 1, 0x04, 0x00000000 }, 436 { 0x404170, 1, 0x04, 0x00000000 }, 437 {} 438 }; 439 440 const struct gf100_gr_init 441 gf100_gr_init_pri_0[] = { 442 { 0x404488, 2, 0x04, 0x00000000 }, 443 {} 444 }; 445 446 const struct gf100_gr_init 447 gf100_gr_init_rstr2d_0[] = { 448 { 0x407808, 1, 0x04, 0x00000000 }, 449 {} 450 }; 451 452 const struct gf100_gr_init 453 gf100_gr_init_pd_0[] = { 454 { 0x406024, 1, 0x04, 0x00000000 }, 455 {} 456 }; 457 458 const struct gf100_gr_init 459 gf100_gr_init_ds_0[] = { 460 { 0x405844, 1, 0x04, 0x00ffffff }, 461 { 0x405850, 1, 0x04, 0x00000000 }, 462 { 0x405908, 1, 0x04, 0x00000000 }, 463 {} 464 }; 465 466 const struct gf100_gr_init 467 gf100_gr_init_scc_0[] = { 468 { 0x40803c, 1, 0x04, 0x00000000 }, 469 {} 470 }; 471 472 const struct gf100_gr_init 473 gf100_gr_init_prop_0[] = { 474 { 0x4184a0, 1, 0x04, 0x00000000 }, 475 {} 476 }; 477 478 const struct gf100_gr_init 479 gf100_gr_init_gpc_unk_0[] = { 480 { 0x418604, 1, 0x04, 0x00000000 }, 481 { 0x418680, 1, 0x04, 0x00000000 }, 482 { 0x418714, 1, 0x04, 0x80000000 }, 483 { 0x418384, 1, 0x04, 0x00000000 }, 484 {} 485 }; 486 487 const struct gf100_gr_init 488 gf100_gr_init_setup_0[] = { 489 { 0x418814, 3, 0x04, 0x00000000 }, 490 {} 491 }; 492 493 const struct gf100_gr_init 494 gf100_gr_init_crstr_0[] = { 495 { 0x418b04, 1, 0x04, 0x00000000 }, 496 {} 497 }; 498 499 const struct gf100_gr_init 500 gf100_gr_init_setup_1[] = { 501 { 0x4188c8, 1, 0x04, 0x80000000 }, 502 { 0x4188cc, 1, 0x04, 0x00000000 }, 503 { 0x4188d0, 1, 0x04, 0x00010000 }, 504 { 0x4188d4, 1, 0x04, 0x00000001 }, 505 {} 506 }; 507 508 const struct gf100_gr_init 509 gf100_gr_init_zcull_0[] = { 510 { 0x418910, 1, 0x04, 0x00010001 }, 511 { 0x418914, 1, 0x04, 0x00000301 }, 512 { 0x418918, 1, 0x04, 0x00800000 }, 513 { 0x418980, 1, 0x04, 0x77777770 }, 514 { 0x418984, 3, 0x04, 0x77777777 }, 515 {} 516 }; 517 518 const struct gf100_gr_init 519 gf100_gr_init_gpm_0[] = { 520 { 0x418c04, 1, 0x04, 0x00000000 }, 521 { 0x418c88, 1, 0x04, 0x00000000 }, 522 {} 523 }; 524 525 const struct gf100_gr_init 526 gf100_gr_init_gpc_unk_1[] = { 527 { 0x418d00, 1, 0x04, 0x00000000 }, 528 { 0x418f08, 1, 0x04, 0x00000000 }, 529 { 0x418e00, 1, 0x04, 0x00000050 }, 530 { 0x418e08, 1, 0x04, 0x00000000 }, 531 {} 532 }; 533 534 const struct gf100_gr_init 535 gf100_gr_init_gcc_0[] = { 536 { 0x41900c, 1, 0x04, 0x00000000 }, 537 { 0x419018, 1, 0x04, 0x00000000 }, 538 {} 539 }; 540 541 const struct gf100_gr_init 542 gf100_gr_init_tpccs_0[] = { 543 { 0x419d08, 2, 0x04, 0x00000000 }, 544 { 0x419d10, 1, 0x04, 0x00000014 }, 545 {} 546 }; 547 548 const struct gf100_gr_init 549 gf100_gr_init_tex_0[] = { 550 { 0x419ab0, 1, 0x04, 0x00000000 }, 551 { 0x419ab8, 1, 0x04, 0x000000e7 }, 552 { 0x419abc, 2, 0x04, 0x00000000 }, 553 {} 554 }; 555 556 const struct gf100_gr_init 557 gf100_gr_init_pe_0[] = { 558 { 0x41980c, 3, 0x04, 0x00000000 }, 559 { 0x419844, 1, 0x04, 0x00000000 }, 560 { 0x41984c, 1, 0x04, 0x00005bc5 }, 561 { 0x419850, 4, 0x04, 0x00000000 }, 562 {} 563 }; 564 565 const struct gf100_gr_init 566 gf100_gr_init_l1c_0[] = { 567 { 0x419c98, 1, 0x04, 0x00000000 }, 568 { 0x419ca8, 1, 0x04, 0x80000000 }, 569 { 0x419cb4, 1, 0x04, 0x00000000 }, 570 { 0x419cb8, 1, 0x04, 0x00008bf4 }, 571 { 0x419cbc, 1, 0x04, 0x28137606 }, 572 { 0x419cc0, 2, 0x04, 0x00000000 }, 573 {} 574 }; 575 576 const struct gf100_gr_init 577 gf100_gr_init_wwdx_0[] = { 578 { 0x419bd4, 1, 0x04, 0x00800000 }, 579 { 0x419bdc, 1, 0x04, 0x00000000 }, 580 {} 581 }; 582 583 const struct gf100_gr_init 584 gf100_gr_init_tpccs_1[] = { 585 { 0x419d2c, 1, 0x04, 0x00000000 }, 586 {} 587 }; 588 589 const struct gf100_gr_init 590 gf100_gr_init_mpc_0[] = { 591 { 0x419c0c, 1, 0x04, 0x00000000 }, 592 {} 593 }; 594 595 static const struct gf100_gr_init 596 gf100_gr_init_sm_0[] = { 597 { 0x419e00, 1, 0x04, 0x00000000 }, 598 { 0x419ea0, 1, 0x04, 0x00000000 }, 599 { 0x419ea4, 1, 0x04, 0x00000100 }, 600 { 0x419ea8, 1, 0x04, 0x00001100 }, 601 { 0x419eac, 1, 0x04, 0x11100702 }, 602 { 0x419eb0, 1, 0x04, 0x00000003 }, 603 { 0x419eb4, 4, 0x04, 0x00000000 }, 604 { 0x419ec8, 1, 0x04, 0x06060618 }, 605 { 0x419ed0, 1, 0x04, 0x0eff0e38 }, 606 { 0x419ed4, 1, 0x04, 0x011104f1 }, 607 { 0x419edc, 1, 0x04, 0x00000000 }, 608 { 0x419f00, 1, 0x04, 0x00000000 }, 609 { 0x419f2c, 1, 0x04, 0x00000000 }, 610 {} 611 }; 612 613 const struct gf100_gr_init 614 gf100_gr_init_be_0[] = { 615 { 0x40880c, 1, 0x04, 0x00000000 }, 616 { 0x408910, 9, 0x04, 0x00000000 }, 617 { 0x408950, 1, 0x04, 0x00000000 }, 618 { 0x408954, 1, 0x04, 0x0000ffff }, 619 { 0x408984, 1, 0x04, 0x00000000 }, 620 { 0x408988, 1, 0x04, 0x08040201 }, 621 { 0x40898c, 1, 0x04, 0x80402010 }, 622 {} 623 }; 624 625 const struct gf100_gr_init 626 gf100_gr_init_fe_1[] = { 627 { 0x4040f0, 1, 0x04, 0x00000000 }, 628 {} 629 }; 630 631 const struct gf100_gr_init 632 gf100_gr_init_pe_1[] = { 633 { 0x419880, 1, 0x04, 0x00000002 }, 634 {} 635 }; 636 637 static const struct gf100_gr_pack 638 gf100_gr_pack_mmio[] = { 639 { gf100_gr_init_main_0 }, 640 { gf100_gr_init_fe_0 }, 641 { gf100_gr_init_pri_0 }, 642 { gf100_gr_init_rstr2d_0 }, 643 { gf100_gr_init_pd_0 }, 644 { gf100_gr_init_ds_0 }, 645 { gf100_gr_init_scc_0 }, 646 { gf100_gr_init_prop_0 }, 647 { gf100_gr_init_gpc_unk_0 }, 648 { gf100_gr_init_setup_0 }, 649 { gf100_gr_init_crstr_0 }, 650 { gf100_gr_init_setup_1 }, 651 { gf100_gr_init_zcull_0 }, 652 { gf100_gr_init_gpm_0 }, 653 { gf100_gr_init_gpc_unk_1 }, 654 { gf100_gr_init_gcc_0 }, 655 { gf100_gr_init_tpccs_0 }, 656 { gf100_gr_init_tex_0 }, 657 { gf100_gr_init_pe_0 }, 658 { gf100_gr_init_l1c_0 }, 659 { gf100_gr_init_wwdx_0 }, 660 { gf100_gr_init_tpccs_1 }, 661 { gf100_gr_init_mpc_0 }, 662 { gf100_gr_init_sm_0 }, 663 { gf100_gr_init_be_0 }, 664 { gf100_gr_init_fe_1 }, 665 { gf100_gr_init_pe_1 }, 666 {} 667 }; 668 669 /******************************************************************************* 670 * PGRAPH engine/subdev functions 671 ******************************************************************************/ 672 673 void 674 gf100_gr_zbc_init(struct gf100_gr *gr) 675 { 676 const u32 zero[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 677 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; 678 const u32 one[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 679 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }; 680 const u32 f32_0[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 681 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; 682 const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 683 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 }; 684 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc; 685 int index; 686 687 if (!gr->zbc_color[0].format) { 688 gf100_gr_zbc_color_get(gr, 1, & zero[0], &zero[4]); 689 gf100_gr_zbc_color_get(gr, 2, & one[0], &one[4]); 690 gf100_gr_zbc_color_get(gr, 4, &f32_0[0], &f32_0[4]); 691 gf100_gr_zbc_color_get(gr, 4, &f32_1[0], &f32_1[4]); 692 gf100_gr_zbc_depth_get(gr, 1, 0x00000000, 0x00000000); 693 gf100_gr_zbc_depth_get(gr, 1, 0x3f800000, 0x3f800000); 694 } 695 696 for (index = ltc->zbc_min; index <= ltc->zbc_max; index++) 697 gf100_gr_zbc_clear_color(gr, index); 698 for (index = ltc->zbc_min; index <= ltc->zbc_max; index++) 699 gf100_gr_zbc_clear_depth(gr, index); 700 } 701 702 /** 703 * Wait until GR goes idle. GR is considered idle if it is disabled by the 704 * MC (0x200) register, or GR is not busy and a context switch is not in 705 * progress. 706 */ 707 int 708 gf100_gr_wait_idle(struct gf100_gr *gr) 709 { 710 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 711 struct nvkm_device *device = subdev->device; 712 unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000); 713 bool gr_enabled, ctxsw_active, gr_busy; 714 715 do { 716 /* 717 * required to make sure FIFO_ENGINE_STATUS (0x2640) is 718 * up-to-date 719 */ 720 nvkm_rd32(device, 0x400700); 721 722 gr_enabled = nvkm_rd32(device, 0x200) & 0x1000; 723 ctxsw_active = nvkm_rd32(device, 0x2640) & 0x8000; 724 gr_busy = nvkm_rd32(device, 0x40060c) & 0x1; 725 726 if (!gr_enabled || (!gr_busy && !ctxsw_active)) 727 return 0; 728 } while (time_before(jiffies, end_jiffies)); 729 730 nvkm_error(subdev, 731 "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n", 732 gr_enabled, ctxsw_active, gr_busy); 733 return -EAGAIN; 734 } 735 736 void 737 gf100_gr_mmio(struct gf100_gr *gr, const struct gf100_gr_pack *p) 738 { 739 struct nvkm_device *device = gr->base.engine.subdev.device; 740 const struct gf100_gr_pack *pack; 741 const struct gf100_gr_init *init; 742 743 pack_for_each_init(init, pack, p) { 744 u32 next = init->addr + init->count * init->pitch; 745 u32 addr = init->addr; 746 while (addr < next) { 747 nvkm_wr32(device, addr, init->data); 748 addr += init->pitch; 749 } 750 } 751 } 752 753 void 754 gf100_gr_icmd(struct gf100_gr *gr, const struct gf100_gr_pack *p) 755 { 756 struct nvkm_device *device = gr->base.engine.subdev.device; 757 const struct gf100_gr_pack *pack; 758 const struct gf100_gr_init *init; 759 u32 data = 0; 760 761 nvkm_wr32(device, 0x400208, 0x80000000); 762 763 pack_for_each_init(init, pack, p) { 764 u32 next = init->addr + init->count * init->pitch; 765 u32 addr = init->addr; 766 767 if ((pack == p && init == p->init) || data != init->data) { 768 nvkm_wr32(device, 0x400204, init->data); 769 data = init->data; 770 } 771 772 while (addr < next) { 773 nvkm_wr32(device, 0x400200, addr); 774 /** 775 * Wait for GR to go idle after submitting a 776 * GO_IDLE bundle 777 */ 778 if ((addr & 0xffff) == 0xe100) 779 gf100_gr_wait_idle(gr); 780 nvkm_msec(device, 2000, 781 if (!(nvkm_rd32(device, 0x400700) & 0x00000004)) 782 break; 783 ); 784 addr += init->pitch; 785 } 786 } 787 788 nvkm_wr32(device, 0x400208, 0x00000000); 789 } 790 791 void 792 gf100_gr_mthd(struct gf100_gr *gr, const struct gf100_gr_pack *p) 793 { 794 struct nvkm_device *device = gr->base.engine.subdev.device; 795 const struct gf100_gr_pack *pack; 796 const struct gf100_gr_init *init; 797 u32 data = 0; 798 799 pack_for_each_init(init, pack, p) { 800 u32 ctrl = 0x80000000 | pack->type; 801 u32 next = init->addr + init->count * init->pitch; 802 u32 addr = init->addr; 803 804 if ((pack == p && init == p->init) || data != init->data) { 805 nvkm_wr32(device, 0x40448c, init->data); 806 data = init->data; 807 } 808 809 while (addr < next) { 810 nvkm_wr32(device, 0x404488, ctrl | (addr << 14)); 811 addr += init->pitch; 812 } 813 } 814 } 815 816 u64 817 gf100_gr_units(struct nvkm_gr *base) 818 { 819 struct gf100_gr *gr = gf100_gr(base); 820 u64 cfg; 821 822 cfg = (u32)gr->gpc_nr; 823 cfg |= (u32)gr->tpc_total << 8; 824 cfg |= (u64)gr->rop_nr << 32; 825 826 return cfg; 827 } 828 829 static const struct nvkm_bitfield gk104_sked_error[] = { 830 { 0x00000080, "CONSTANT_BUFFER_SIZE" }, 831 { 0x00000200, "LOCAL_MEMORY_SIZE_POS" }, 832 { 0x00000400, "LOCAL_MEMORY_SIZE_NEG" }, 833 { 0x00000800, "WARP_CSTACK_SIZE" }, 834 { 0x00001000, "TOTAL_TEMP_SIZE" }, 835 { 0x00002000, "REGISTER_COUNT" }, 836 { 0x00040000, "TOTAL_THREADS" }, 837 { 0x00100000, "PROGRAM_OFFSET" }, 838 { 0x00200000, "SHARED_MEMORY_SIZE" }, 839 { 0x02000000, "SHARED_CONFIG_TOO_SMALL" }, 840 { 0x04000000, "TOTAL_REGISTER_COUNT" }, 841 {} 842 }; 843 844 static const struct nvkm_bitfield gf100_gpc_rop_error[] = { 845 { 0x00000002, "RT_PITCH_OVERRUN" }, 846 { 0x00000010, "RT_WIDTH_OVERRUN" }, 847 { 0x00000020, "RT_HEIGHT_OVERRUN" }, 848 { 0x00000080, "ZETA_STORAGE_TYPE_MISMATCH" }, 849 { 0x00000100, "RT_STORAGE_TYPE_MISMATCH" }, 850 { 0x00000400, "RT_LINEAR_MISMATCH" }, 851 {} 852 }; 853 854 static void 855 gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc) 856 { 857 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 858 struct nvkm_device *device = subdev->device; 859 char error[128]; 860 u32 trap[4]; 861 862 trap[0] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0420)) & 0x3fffffff; 863 trap[1] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0434)); 864 trap[2] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0438)); 865 trap[3] = nvkm_rd32(device, GPC_UNIT(gpc, 0x043c)); 866 867 nvkm_snprintbf(error, sizeof(error), gf100_gpc_rop_error, trap[0]); 868 869 nvkm_error(subdev, "GPC%d/PROP trap: %08x [%s] x = %u, y = %u, " 870 "format = %x, storage type = %x\n", 871 gpc, trap[0], error, trap[1] & 0xffff, trap[1] >> 16, 872 (trap[2] >> 8) & 0x3f, trap[3] & 0xff); 873 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000); 874 } 875 876 static const struct nvkm_enum gf100_mp_warp_error[] = { 877 { 0x00, "NO_ERROR" }, 878 { 0x01, "STACK_MISMATCH" }, 879 { 0x05, "MISALIGNED_PC" }, 880 { 0x08, "MISALIGNED_GPR" }, 881 { 0x09, "INVALID_OPCODE" }, 882 { 0x0d, "GPR_OUT_OF_BOUNDS" }, 883 { 0x0e, "MEM_OUT_OF_BOUNDS" }, 884 { 0x0f, "UNALIGNED_MEM_ACCESS" }, 885 { 0x11, "INVALID_PARAM" }, 886 {} 887 }; 888 889 static const struct nvkm_bitfield gf100_mp_global_error[] = { 890 { 0x00000004, "MULTIPLE_WARP_ERRORS" }, 891 { 0x00000008, "OUT_OF_STACK_SPACE" }, 892 {} 893 }; 894 895 static void 896 gf100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc) 897 { 898 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 899 struct nvkm_device *device = subdev->device; 900 u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x648)); 901 u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x650)); 902 const struct nvkm_enum *warp; 903 char glob[128]; 904 905 nvkm_snprintbf(glob, sizeof(glob), gf100_mp_global_error, gerr); 906 warp = nvkm_enum_find(gf100_mp_warp_error, werr & 0xffff); 907 908 nvkm_error(subdev, "GPC%i/TPC%i/MP trap: " 909 "global %08x [%s] warp %04x [%s]\n", 910 gpc, tpc, gerr, glob, werr, warp ? warp->name : ""); 911 912 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x648), 0x00000000); 913 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x650), gerr); 914 } 915 916 static void 917 gf100_gr_trap_tpc(struct gf100_gr *gr, int gpc, int tpc) 918 { 919 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 920 struct nvkm_device *device = subdev->device; 921 u32 stat = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0508)); 922 923 if (stat & 0x00000001) { 924 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0224)); 925 nvkm_error(subdev, "GPC%d/TPC%d/TEX: %08x\n", gpc, tpc, trap); 926 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000); 927 stat &= ~0x00000001; 928 } 929 930 if (stat & 0x00000002) { 931 gf100_gr_trap_mp(gr, gpc, tpc); 932 stat &= ~0x00000002; 933 } 934 935 if (stat & 0x00000004) { 936 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0084)); 937 nvkm_error(subdev, "GPC%d/TPC%d/POLY: %08x\n", gpc, tpc, trap); 938 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000); 939 stat &= ~0x00000004; 940 } 941 942 if (stat & 0x00000008) { 943 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x048c)); 944 nvkm_error(subdev, "GPC%d/TPC%d/L1C: %08x\n", gpc, tpc, trap); 945 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000); 946 stat &= ~0x00000008; 947 } 948 949 if (stat) { 950 nvkm_error(subdev, "GPC%d/TPC%d/%08x: unknown\n", gpc, tpc, stat); 951 } 952 } 953 954 static void 955 gf100_gr_trap_gpc(struct gf100_gr *gr, int gpc) 956 { 957 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 958 struct nvkm_device *device = subdev->device; 959 u32 stat = nvkm_rd32(device, GPC_UNIT(gpc, 0x2c90)); 960 int tpc; 961 962 if (stat & 0x00000001) { 963 gf100_gr_trap_gpc_rop(gr, gpc); 964 stat &= ~0x00000001; 965 } 966 967 if (stat & 0x00000002) { 968 u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0900)); 969 nvkm_error(subdev, "GPC%d/ZCULL: %08x\n", gpc, trap); 970 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000); 971 stat &= ~0x00000002; 972 } 973 974 if (stat & 0x00000004) { 975 u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x1028)); 976 nvkm_error(subdev, "GPC%d/CCACHE: %08x\n", gpc, trap); 977 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000); 978 stat &= ~0x00000004; 979 } 980 981 if (stat & 0x00000008) { 982 u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0824)); 983 nvkm_error(subdev, "GPC%d/ESETUP: %08x\n", gpc, trap); 984 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000); 985 stat &= ~0x00000009; 986 } 987 988 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) { 989 u32 mask = 0x00010000 << tpc; 990 if (stat & mask) { 991 gf100_gr_trap_tpc(gr, gpc, tpc); 992 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), mask); 993 stat &= ~mask; 994 } 995 } 996 997 if (stat) { 998 nvkm_error(subdev, "GPC%d/%08x: unknown\n", gpc, stat); 999 } 1000 } 1001 1002 static void 1003 gf100_gr_trap_intr(struct gf100_gr *gr) 1004 { 1005 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1006 struct nvkm_device *device = subdev->device; 1007 u32 trap = nvkm_rd32(device, 0x400108); 1008 int rop, gpc; 1009 1010 if (trap & 0x00000001) { 1011 u32 stat = nvkm_rd32(device, 0x404000); 1012 nvkm_error(subdev, "DISPATCH %08x\n", stat); 1013 nvkm_wr32(device, 0x404000, 0xc0000000); 1014 nvkm_wr32(device, 0x400108, 0x00000001); 1015 trap &= ~0x00000001; 1016 } 1017 1018 if (trap & 0x00000002) { 1019 u32 stat = nvkm_rd32(device, 0x404600); 1020 nvkm_error(subdev, "M2MF %08x\n", stat); 1021 nvkm_wr32(device, 0x404600, 0xc0000000); 1022 nvkm_wr32(device, 0x400108, 0x00000002); 1023 trap &= ~0x00000002; 1024 } 1025 1026 if (trap & 0x00000008) { 1027 u32 stat = nvkm_rd32(device, 0x408030); 1028 nvkm_error(subdev, "CCACHE %08x\n", stat); 1029 nvkm_wr32(device, 0x408030, 0xc0000000); 1030 nvkm_wr32(device, 0x400108, 0x00000008); 1031 trap &= ~0x00000008; 1032 } 1033 1034 if (trap & 0x00000010) { 1035 u32 stat = nvkm_rd32(device, 0x405840); 1036 nvkm_error(subdev, "SHADER %08x\n", stat); 1037 nvkm_wr32(device, 0x405840, 0xc0000000); 1038 nvkm_wr32(device, 0x400108, 0x00000010); 1039 trap &= ~0x00000010; 1040 } 1041 1042 if (trap & 0x00000040) { 1043 u32 stat = nvkm_rd32(device, 0x40601c); 1044 nvkm_error(subdev, "UNK6 %08x\n", stat); 1045 nvkm_wr32(device, 0x40601c, 0xc0000000); 1046 nvkm_wr32(device, 0x400108, 0x00000040); 1047 trap &= ~0x00000040; 1048 } 1049 1050 if (trap & 0x00000080) { 1051 u32 stat = nvkm_rd32(device, 0x404490); 1052 nvkm_error(subdev, "MACRO %08x\n", stat); 1053 nvkm_wr32(device, 0x404490, 0xc0000000); 1054 nvkm_wr32(device, 0x400108, 0x00000080); 1055 trap &= ~0x00000080; 1056 } 1057 1058 if (trap & 0x00000100) { 1059 u32 stat = nvkm_rd32(device, 0x407020) & 0x3fffffff; 1060 char sked[128]; 1061 1062 nvkm_snprintbf(sked, sizeof(sked), gk104_sked_error, stat); 1063 nvkm_error(subdev, "SKED: %08x [%s]\n", stat, sked); 1064 1065 if (stat) 1066 nvkm_wr32(device, 0x407020, 0x40000000); 1067 nvkm_wr32(device, 0x400108, 0x00000100); 1068 trap &= ~0x00000100; 1069 } 1070 1071 if (trap & 0x01000000) { 1072 u32 stat = nvkm_rd32(device, 0x400118); 1073 for (gpc = 0; stat && gpc < gr->gpc_nr; gpc++) { 1074 u32 mask = 0x00000001 << gpc; 1075 if (stat & mask) { 1076 gf100_gr_trap_gpc(gr, gpc); 1077 nvkm_wr32(device, 0x400118, mask); 1078 stat &= ~mask; 1079 } 1080 } 1081 nvkm_wr32(device, 0x400108, 0x01000000); 1082 trap &= ~0x01000000; 1083 } 1084 1085 if (trap & 0x02000000) { 1086 for (rop = 0; rop < gr->rop_nr; rop++) { 1087 u32 statz = nvkm_rd32(device, ROP_UNIT(rop, 0x070)); 1088 u32 statc = nvkm_rd32(device, ROP_UNIT(rop, 0x144)); 1089 nvkm_error(subdev, "ROP%d %08x %08x\n", 1090 rop, statz, statc); 1091 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000); 1092 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000); 1093 } 1094 nvkm_wr32(device, 0x400108, 0x02000000); 1095 trap &= ~0x02000000; 1096 } 1097 1098 if (trap) { 1099 nvkm_error(subdev, "TRAP UNHANDLED %08x\n", trap); 1100 nvkm_wr32(device, 0x400108, trap); 1101 } 1102 } 1103 1104 static void 1105 gf100_gr_ctxctl_debug_unit(struct gf100_gr *gr, u32 base) 1106 { 1107 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1108 struct nvkm_device *device = subdev->device; 1109 nvkm_error(subdev, "%06x - done %08x\n", base, 1110 nvkm_rd32(device, base + 0x400)); 1111 nvkm_error(subdev, "%06x - stat %08x %08x %08x %08x\n", base, 1112 nvkm_rd32(device, base + 0x800), 1113 nvkm_rd32(device, base + 0x804), 1114 nvkm_rd32(device, base + 0x808), 1115 nvkm_rd32(device, base + 0x80c)); 1116 nvkm_error(subdev, "%06x - stat %08x %08x %08x %08x\n", base, 1117 nvkm_rd32(device, base + 0x810), 1118 nvkm_rd32(device, base + 0x814), 1119 nvkm_rd32(device, base + 0x818), 1120 nvkm_rd32(device, base + 0x81c)); 1121 } 1122 1123 void 1124 gf100_gr_ctxctl_debug(struct gf100_gr *gr) 1125 { 1126 struct nvkm_device *device = gr->base.engine.subdev.device; 1127 u32 gpcnr = nvkm_rd32(device, 0x409604) & 0xffff; 1128 u32 gpc; 1129 1130 gf100_gr_ctxctl_debug_unit(gr, 0x409000); 1131 for (gpc = 0; gpc < gpcnr; gpc++) 1132 gf100_gr_ctxctl_debug_unit(gr, 0x502000 + (gpc * 0x8000)); 1133 } 1134 1135 static void 1136 gf100_gr_ctxctl_isr(struct gf100_gr *gr) 1137 { 1138 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1139 struct nvkm_device *device = subdev->device; 1140 u32 stat = nvkm_rd32(device, 0x409c18); 1141 1142 if (stat & 0x00000001) { 1143 u32 code = nvkm_rd32(device, 0x409814); 1144 if (code == E_BAD_FWMTHD) { 1145 u32 class = nvkm_rd32(device, 0x409808); 1146 u32 addr = nvkm_rd32(device, 0x40980c); 1147 u32 subc = (addr & 0x00070000) >> 16; 1148 u32 mthd = (addr & 0x00003ffc); 1149 u32 data = nvkm_rd32(device, 0x409810); 1150 1151 nvkm_error(subdev, "FECS MTHD subc %d class %04x " 1152 "mthd %04x data %08x\n", 1153 subc, class, mthd, data); 1154 1155 nvkm_wr32(device, 0x409c20, 0x00000001); 1156 stat &= ~0x00000001; 1157 } else { 1158 nvkm_error(subdev, "FECS ucode error %d\n", code); 1159 } 1160 } 1161 1162 if (stat & 0x00080000) { 1163 nvkm_error(subdev, "FECS watchdog timeout\n"); 1164 gf100_gr_ctxctl_debug(gr); 1165 nvkm_wr32(device, 0x409c20, 0x00080000); 1166 stat &= ~0x00080000; 1167 } 1168 1169 if (stat) { 1170 nvkm_error(subdev, "FECS %08x\n", stat); 1171 gf100_gr_ctxctl_debug(gr); 1172 nvkm_wr32(device, 0x409c20, stat); 1173 } 1174 } 1175 1176 static void 1177 gf100_gr_intr(struct nvkm_gr *base) 1178 { 1179 struct gf100_gr *gr = gf100_gr(base); 1180 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1181 struct nvkm_device *device = subdev->device; 1182 struct nvkm_fifo_chan *chan; 1183 unsigned long flags; 1184 u64 inst = nvkm_rd32(device, 0x409b00) & 0x0fffffff; 1185 u32 stat = nvkm_rd32(device, 0x400100); 1186 u32 addr = nvkm_rd32(device, 0x400704); 1187 u32 mthd = (addr & 0x00003ffc); 1188 u32 subc = (addr & 0x00070000) >> 16; 1189 u32 data = nvkm_rd32(device, 0x400708); 1190 u32 code = nvkm_rd32(device, 0x400110); 1191 u32 class; 1192 const char *name = "unknown"; 1193 int chid = -1; 1194 1195 chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags); 1196 if (chan) { 1197 name = chan->object.client->name; 1198 chid = chan->chid; 1199 } 1200 1201 if (device->card_type < NV_E0 || subc < 4) 1202 class = nvkm_rd32(device, 0x404200 + (subc * 4)); 1203 else 1204 class = 0x0000; 1205 1206 if (stat & 0x00000001) { 1207 /* 1208 * notifier interrupt, only needed for cyclestats 1209 * can be safely ignored 1210 */ 1211 nvkm_wr32(device, 0x400100, 0x00000001); 1212 stat &= ~0x00000001; 1213 } 1214 1215 if (stat & 0x00000010) { 1216 if (!gf100_gr_mthd_sw(device, class, mthd, data)) { 1217 nvkm_error(subdev, "ILLEGAL_MTHD ch %d [%010llx %s] " 1218 "subc %d class %04x mthd %04x data %08x\n", 1219 chid, inst << 12, name, subc, 1220 class, mthd, data); 1221 } 1222 nvkm_wr32(device, 0x400100, 0x00000010); 1223 stat &= ~0x00000010; 1224 } 1225 1226 if (stat & 0x00000020) { 1227 nvkm_error(subdev, "ILLEGAL_CLASS ch %d [%010llx %s] " 1228 "subc %d class %04x mthd %04x data %08x\n", 1229 chid, inst << 12, name, subc, class, mthd, data); 1230 nvkm_wr32(device, 0x400100, 0x00000020); 1231 stat &= ~0x00000020; 1232 } 1233 1234 if (stat & 0x00100000) { 1235 const struct nvkm_enum *en = 1236 nvkm_enum_find(nv50_data_error_names, code); 1237 nvkm_error(subdev, "DATA_ERROR %08x [%s] ch %d [%010llx %s] " 1238 "subc %d class %04x mthd %04x data %08x\n", 1239 code, en ? en->name : "", chid, inst << 12, 1240 name, subc, class, mthd, data); 1241 nvkm_wr32(device, 0x400100, 0x00100000); 1242 stat &= ~0x00100000; 1243 } 1244 1245 if (stat & 0x00200000) { 1246 nvkm_error(subdev, "TRAP ch %d [%010llx %s]\n", 1247 chid, inst << 12, name); 1248 gf100_gr_trap_intr(gr); 1249 nvkm_wr32(device, 0x400100, 0x00200000); 1250 stat &= ~0x00200000; 1251 } 1252 1253 if (stat & 0x00080000) { 1254 gf100_gr_ctxctl_isr(gr); 1255 nvkm_wr32(device, 0x400100, 0x00080000); 1256 stat &= ~0x00080000; 1257 } 1258 1259 if (stat) { 1260 nvkm_error(subdev, "intr %08x\n", stat); 1261 nvkm_wr32(device, 0x400100, stat); 1262 } 1263 1264 nvkm_wr32(device, 0x400500, 0x00010001); 1265 nvkm_fifo_chan_put(device->fifo, flags, &chan); 1266 } 1267 1268 void 1269 gf100_gr_init_fw(struct gf100_gr *gr, u32 fuc_base, 1270 struct gf100_gr_fuc *code, struct gf100_gr_fuc *data) 1271 { 1272 struct nvkm_device *device = gr->base.engine.subdev.device; 1273 int i; 1274 1275 nvkm_wr32(device, fuc_base + 0x01c0, 0x01000000); 1276 for (i = 0; i < data->size / 4; i++) 1277 nvkm_wr32(device, fuc_base + 0x01c4, data->data[i]); 1278 1279 nvkm_wr32(device, fuc_base + 0x0180, 0x01000000); 1280 for (i = 0; i < code->size / 4; i++) { 1281 if ((i & 0x3f) == 0) 1282 nvkm_wr32(device, fuc_base + 0x0188, i >> 6); 1283 nvkm_wr32(device, fuc_base + 0x0184, code->data[i]); 1284 } 1285 1286 /* code must be padded to 0x40 words */ 1287 for (; i & 0x3f; i++) 1288 nvkm_wr32(device, fuc_base + 0x0184, 0); 1289 } 1290 1291 static void 1292 gf100_gr_init_csdata(struct gf100_gr *gr, 1293 const struct gf100_gr_pack *pack, 1294 u32 falcon, u32 starstar, u32 base) 1295 { 1296 struct nvkm_device *device = gr->base.engine.subdev.device; 1297 const struct gf100_gr_pack *iter; 1298 const struct gf100_gr_init *init; 1299 u32 addr = ~0, prev = ~0, xfer = 0; 1300 u32 star, temp; 1301 1302 nvkm_wr32(device, falcon + 0x01c0, 0x02000000 + starstar); 1303 star = nvkm_rd32(device, falcon + 0x01c4); 1304 temp = nvkm_rd32(device, falcon + 0x01c4); 1305 if (temp > star) 1306 star = temp; 1307 nvkm_wr32(device, falcon + 0x01c0, 0x01000000 + star); 1308 1309 pack_for_each_init(init, iter, pack) { 1310 u32 head = init->addr - base; 1311 u32 tail = head + init->count * init->pitch; 1312 while (head < tail) { 1313 if (head != prev + 4 || xfer >= 32) { 1314 if (xfer) { 1315 u32 data = ((--xfer << 26) | addr); 1316 nvkm_wr32(device, falcon + 0x01c4, data); 1317 star += 4; 1318 } 1319 addr = head; 1320 xfer = 0; 1321 } 1322 prev = head; 1323 xfer = xfer + 1; 1324 head = head + init->pitch; 1325 } 1326 } 1327 1328 nvkm_wr32(device, falcon + 0x01c4, (--xfer << 26) | addr); 1329 nvkm_wr32(device, falcon + 0x01c0, 0x01000004 + starstar); 1330 nvkm_wr32(device, falcon + 0x01c4, star + 4); 1331 } 1332 1333 int 1334 gf100_gr_init_ctxctl(struct gf100_gr *gr) 1335 { 1336 const struct gf100_grctx_func *grctx = gr->func->grctx; 1337 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1338 struct nvkm_device *device = subdev->device; 1339 int i; 1340 1341 if (gr->firmware) { 1342 /* load fuc microcode */ 1343 nvkm_mc_unk260(device->mc, 0); 1344 gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c, &gr->fuc409d); 1345 gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac, &gr->fuc41ad); 1346 nvkm_mc_unk260(device->mc, 1); 1347 1348 /* start both of them running */ 1349 nvkm_wr32(device, 0x409840, 0xffffffff); 1350 nvkm_wr32(device, 0x41a10c, 0x00000000); 1351 nvkm_wr32(device, 0x40910c, 0x00000000); 1352 nvkm_wr32(device, 0x41a100, 0x00000002); 1353 nvkm_wr32(device, 0x409100, 0x00000002); 1354 if (nvkm_msec(device, 2000, 1355 if (nvkm_rd32(device, 0x409800) & 0x00000001) 1356 break; 1357 ) < 0) 1358 return -EBUSY; 1359 1360 nvkm_wr32(device, 0x409840, 0xffffffff); 1361 nvkm_wr32(device, 0x409500, 0x7fffffff); 1362 nvkm_wr32(device, 0x409504, 0x00000021); 1363 1364 nvkm_wr32(device, 0x409840, 0xffffffff); 1365 nvkm_wr32(device, 0x409500, 0x00000000); 1366 nvkm_wr32(device, 0x409504, 0x00000010); 1367 if (nvkm_msec(device, 2000, 1368 if ((gr->size = nvkm_rd32(device, 0x409800))) 1369 break; 1370 ) < 0) 1371 return -EBUSY; 1372 1373 nvkm_wr32(device, 0x409840, 0xffffffff); 1374 nvkm_wr32(device, 0x409500, 0x00000000); 1375 nvkm_wr32(device, 0x409504, 0x00000016); 1376 if (nvkm_msec(device, 2000, 1377 if (nvkm_rd32(device, 0x409800)) 1378 break; 1379 ) < 0) 1380 return -EBUSY; 1381 1382 nvkm_wr32(device, 0x409840, 0xffffffff); 1383 nvkm_wr32(device, 0x409500, 0x00000000); 1384 nvkm_wr32(device, 0x409504, 0x00000025); 1385 if (nvkm_msec(device, 2000, 1386 if (nvkm_rd32(device, 0x409800)) 1387 break; 1388 ) < 0) 1389 return -EBUSY; 1390 1391 if (device->chipset >= 0xe0) { 1392 nvkm_wr32(device, 0x409800, 0x00000000); 1393 nvkm_wr32(device, 0x409500, 0x00000001); 1394 nvkm_wr32(device, 0x409504, 0x00000030); 1395 if (nvkm_msec(device, 2000, 1396 if (nvkm_rd32(device, 0x409800)) 1397 break; 1398 ) < 0) 1399 return -EBUSY; 1400 1401 nvkm_wr32(device, 0x409810, 0xb00095c8); 1402 nvkm_wr32(device, 0x409800, 0x00000000); 1403 nvkm_wr32(device, 0x409500, 0x00000001); 1404 nvkm_wr32(device, 0x409504, 0x00000031); 1405 if (nvkm_msec(device, 2000, 1406 if (nvkm_rd32(device, 0x409800)) 1407 break; 1408 ) < 0) 1409 return -EBUSY; 1410 1411 nvkm_wr32(device, 0x409810, 0x00080420); 1412 nvkm_wr32(device, 0x409800, 0x00000000); 1413 nvkm_wr32(device, 0x409500, 0x00000001); 1414 nvkm_wr32(device, 0x409504, 0x00000032); 1415 if (nvkm_msec(device, 2000, 1416 if (nvkm_rd32(device, 0x409800)) 1417 break; 1418 ) < 0) 1419 return -EBUSY; 1420 1421 nvkm_wr32(device, 0x409614, 0x00000070); 1422 nvkm_wr32(device, 0x409614, 0x00000770); 1423 nvkm_wr32(device, 0x40802c, 0x00000001); 1424 } 1425 1426 if (gr->data == NULL) { 1427 int ret = gf100_grctx_generate(gr); 1428 if (ret) { 1429 nvkm_error(subdev, "failed to construct context\n"); 1430 return ret; 1431 } 1432 } 1433 1434 return 0; 1435 } else 1436 if (!gr->func->fecs.ucode) { 1437 return -ENOSYS; 1438 } 1439 1440 /* load HUB microcode */ 1441 nvkm_mc_unk260(device->mc, 0); 1442 nvkm_wr32(device, 0x4091c0, 0x01000000); 1443 for (i = 0; i < gr->func->fecs.ucode->data.size / 4; i++) 1444 nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]); 1445 1446 nvkm_wr32(device, 0x409180, 0x01000000); 1447 for (i = 0; i < gr->func->fecs.ucode->code.size / 4; i++) { 1448 if ((i & 0x3f) == 0) 1449 nvkm_wr32(device, 0x409188, i >> 6); 1450 nvkm_wr32(device, 0x409184, gr->func->fecs.ucode->code.data[i]); 1451 } 1452 1453 /* load GPC microcode */ 1454 nvkm_wr32(device, 0x41a1c0, 0x01000000); 1455 for (i = 0; i < gr->func->gpccs.ucode->data.size / 4; i++) 1456 nvkm_wr32(device, 0x41a1c4, gr->func->gpccs.ucode->data.data[i]); 1457 1458 nvkm_wr32(device, 0x41a180, 0x01000000); 1459 for (i = 0; i < gr->func->gpccs.ucode->code.size / 4; i++) { 1460 if ((i & 0x3f) == 0) 1461 nvkm_wr32(device, 0x41a188, i >> 6); 1462 nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]); 1463 } 1464 nvkm_mc_unk260(device->mc, 1); 1465 1466 /* load register lists */ 1467 gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000); 1468 gf100_gr_init_csdata(gr, grctx->gpc, 0x41a000, 0x000, 0x418000); 1469 gf100_gr_init_csdata(gr, grctx->tpc, 0x41a000, 0x004, 0x419800); 1470 gf100_gr_init_csdata(gr, grctx->ppc, 0x41a000, 0x008, 0x41be00); 1471 1472 /* start HUB ucode running, it'll init the GPCs */ 1473 nvkm_wr32(device, 0x40910c, 0x00000000); 1474 nvkm_wr32(device, 0x409100, 0x00000002); 1475 if (nvkm_msec(device, 2000, 1476 if (nvkm_rd32(device, 0x409800) & 0x80000000) 1477 break; 1478 ) < 0) { 1479 gf100_gr_ctxctl_debug(gr); 1480 return -EBUSY; 1481 } 1482 1483 gr->size = nvkm_rd32(device, 0x409804); 1484 if (gr->data == NULL) { 1485 int ret = gf100_grctx_generate(gr); 1486 if (ret) { 1487 nvkm_error(subdev, "failed to construct context\n"); 1488 return ret; 1489 } 1490 } 1491 1492 return 0; 1493 } 1494 1495 static int 1496 gf100_gr_oneinit(struct nvkm_gr *base) 1497 { 1498 struct gf100_gr *gr = gf100_gr(base); 1499 struct nvkm_device *device = gr->base.engine.subdev.device; 1500 int ret, i, j; 1501 1502 nvkm_pmu_pgob(device->pmu, false); 1503 1504 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false, 1505 &gr->unk4188b4); 1506 if (ret) 1507 return ret; 1508 1509 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false, 1510 &gr->unk4188b8); 1511 if (ret) 1512 return ret; 1513 1514 nvkm_kmap(gr->unk4188b4); 1515 for (i = 0; i < 0x1000; i += 4) 1516 nvkm_wo32(gr->unk4188b4, i, 0x00000010); 1517 nvkm_done(gr->unk4188b4); 1518 1519 nvkm_kmap(gr->unk4188b8); 1520 for (i = 0; i < 0x1000; i += 4) 1521 nvkm_wo32(gr->unk4188b8, i, 0x00000010); 1522 nvkm_done(gr->unk4188b8); 1523 1524 gr->rop_nr = (nvkm_rd32(device, 0x409604) & 0x001f0000) >> 16; 1525 gr->gpc_nr = nvkm_rd32(device, 0x409604) & 0x0000001f; 1526 for (i = 0; i < gr->gpc_nr; i++) { 1527 gr->tpc_nr[i] = nvkm_rd32(device, GPC_UNIT(i, 0x2608)); 1528 gr->tpc_total += gr->tpc_nr[i]; 1529 gr->ppc_nr[i] = gr->func->ppc_nr; 1530 for (j = 0; j < gr->ppc_nr[i]; j++) { 1531 u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4))); 1532 gr->ppc_tpc_nr[i][j] = hweight8(mask); 1533 } 1534 } 1535 1536 /*XXX: these need figuring out... though it might not even matter */ 1537 switch (device->chipset) { 1538 case 0xc0: 1539 if (gr->tpc_total == 11) { /* 465, 3/4/4/0, 4 */ 1540 gr->magic_not_rop_nr = 0x07; 1541 } else 1542 if (gr->tpc_total == 14) { /* 470, 3/3/4/4, 5 */ 1543 gr->magic_not_rop_nr = 0x05; 1544 } else 1545 if (gr->tpc_total == 15) { /* 480, 3/4/4/4, 6 */ 1546 gr->magic_not_rop_nr = 0x06; 1547 } 1548 break; 1549 case 0xc3: /* 450, 4/0/0/0, 2 */ 1550 gr->magic_not_rop_nr = 0x03; 1551 break; 1552 case 0xc4: /* 460, 3/4/0/0, 4 */ 1553 gr->magic_not_rop_nr = 0x01; 1554 break; 1555 case 0xc1: /* 2/0/0/0, 1 */ 1556 gr->magic_not_rop_nr = 0x01; 1557 break; 1558 case 0xc8: /* 4/4/3/4, 5 */ 1559 gr->magic_not_rop_nr = 0x06; 1560 break; 1561 case 0xce: /* 4/4/0/0, 4 */ 1562 gr->magic_not_rop_nr = 0x03; 1563 break; 1564 case 0xcf: /* 4/0/0/0, 3 */ 1565 gr->magic_not_rop_nr = 0x03; 1566 break; 1567 case 0xd7: 1568 case 0xd9: /* 1/0/0/0, 1 */ 1569 case 0xea: /* gk20a */ 1570 case 0x12b: /* gm20b */ 1571 gr->magic_not_rop_nr = 0x01; 1572 break; 1573 } 1574 1575 return 0; 1576 } 1577 1578 int 1579 gf100_gr_init_(struct nvkm_gr *base) 1580 { 1581 struct gf100_gr *gr = gf100_gr(base); 1582 nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false); 1583 return gr->func->init(gr); 1584 } 1585 1586 void 1587 gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc) 1588 { 1589 kfree(fuc->data); 1590 fuc->data = NULL; 1591 } 1592 1593 void * 1594 gf100_gr_dtor(struct nvkm_gr *base) 1595 { 1596 struct gf100_gr *gr = gf100_gr(base); 1597 1598 if (gr->func->dtor) 1599 gr->func->dtor(gr); 1600 kfree(gr->data); 1601 1602 gf100_gr_dtor_fw(&gr->fuc409c); 1603 gf100_gr_dtor_fw(&gr->fuc409d); 1604 gf100_gr_dtor_fw(&gr->fuc41ac); 1605 gf100_gr_dtor_fw(&gr->fuc41ad); 1606 1607 nvkm_memory_del(&gr->unk4188b8); 1608 nvkm_memory_del(&gr->unk4188b4); 1609 return gr; 1610 } 1611 1612 static const struct nvkm_gr_func 1613 gf100_gr_ = { 1614 .dtor = gf100_gr_dtor, 1615 .oneinit = gf100_gr_oneinit, 1616 .init = gf100_gr_init_, 1617 .intr = gf100_gr_intr, 1618 .units = gf100_gr_units, 1619 .chan_new = gf100_gr_chan_new, 1620 .object_get = gf100_gr_object_get, 1621 }; 1622 1623 int 1624 gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname, 1625 struct gf100_gr_fuc *fuc) 1626 { 1627 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1628 struct nvkm_device *device = subdev->device; 1629 const struct firmware *fw; 1630 char f[64]; 1631 char cname[16]; 1632 int ret; 1633 int i; 1634 1635 /* Convert device name to lowercase */ 1636 strncpy(cname, device->chip->name, sizeof(cname)); 1637 cname[sizeof(cname) - 1] = '\0'; 1638 i = strlen(cname); 1639 while (i) { 1640 --i; 1641 cname[i] = tolower(cname[i]); 1642 } 1643 1644 snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname); 1645 ret = request_firmware(&fw, f, device->dev); 1646 if (ret) { 1647 nvkm_error(subdev, "failed to load %s\n", fwname); 1648 return ret; 1649 } 1650 1651 fuc->size = fw->size; 1652 fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL); 1653 release_firmware(fw); 1654 return (fuc->data != NULL) ? 0 : -ENOMEM; 1655 } 1656 1657 int 1658 gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device, 1659 int index, struct gf100_gr *gr) 1660 { 1661 int ret; 1662 1663 gr->func = func; 1664 gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW", 1665 func->fecs.ucode == NULL); 1666 1667 ret = nvkm_gr_ctor(&gf100_gr_, device, index, 0x08001000, 1668 gr->firmware || func->fecs.ucode != NULL, 1669 &gr->base); 1670 if (ret) 1671 return ret; 1672 1673 if (gr->firmware) { 1674 nvkm_info(&gr->base.engine.subdev, "using external firmware\n"); 1675 if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) || 1676 gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) || 1677 gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) || 1678 gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad)) 1679 return -ENODEV; 1680 } 1681 1682 return 0; 1683 } 1684 1685 int 1686 gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device, 1687 int index, struct nvkm_gr **pgr) 1688 { 1689 struct gf100_gr *gr; 1690 if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) 1691 return -ENOMEM; 1692 *pgr = &gr->base; 1693 return gf100_gr_ctor(func, device, index, gr); 1694 } 1695 1696 int 1697 gf100_gr_init(struct gf100_gr *gr) 1698 { 1699 struct nvkm_device *device = gr->base.engine.subdev.device; 1700 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total); 1701 u32 data[TPC_MAX / 8] = {}; 1702 u8 tpcnr[GPC_MAX]; 1703 int gpc, tpc, rop; 1704 int i; 1705 1706 nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000); 1707 nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000); 1708 nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000); 1709 nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000); 1710 nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000); 1711 nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000); 1712 nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8); 1713 nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8); 1714 1715 gf100_gr_mmio(gr, gr->func->mmio); 1716 1717 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr)); 1718 for (i = 0, gpc = -1; i < gr->tpc_total; i++) { 1719 do { 1720 gpc = (gpc + 1) % gr->gpc_nr; 1721 } while (!tpcnr[gpc]); 1722 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--; 1723 1724 data[i / 8] |= tpc << ((i % 8) * 4); 1725 } 1726 1727 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]); 1728 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]); 1729 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]); 1730 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]); 1731 1732 for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 1733 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914), 1734 gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]); 1735 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 | 1736 gr->tpc_total); 1737 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918); 1738 } 1739 1740 if (device->chipset != 0xd7) 1741 nvkm_wr32(device, GPC_BCAST(0x1bd4), magicgpc918); 1742 else 1743 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918); 1744 1745 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800)); 1746 1747 nvkm_wr32(device, 0x400500, 0x00010001); 1748 1749 nvkm_wr32(device, 0x400100, 0xffffffff); 1750 nvkm_wr32(device, 0x40013c, 0xffffffff); 1751 1752 nvkm_wr32(device, 0x409c24, 0x000f0000); 1753 nvkm_wr32(device, 0x404000, 0xc0000000); 1754 nvkm_wr32(device, 0x404600, 0xc0000000); 1755 nvkm_wr32(device, 0x408030, 0xc0000000); 1756 nvkm_wr32(device, 0x40601c, 0xc0000000); 1757 nvkm_wr32(device, 0x404490, 0xc0000000); 1758 nvkm_wr32(device, 0x406018, 0xc0000000); 1759 nvkm_wr32(device, 0x405840, 0xc0000000); 1760 nvkm_wr32(device, 0x405844, 0x00ffffff); 1761 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008); 1762 nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000); 1763 1764 for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 1765 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000); 1766 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000); 1767 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000); 1768 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000); 1769 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) { 1770 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff); 1771 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff); 1772 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000); 1773 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000); 1774 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000); 1775 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe); 1776 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f); 1777 } 1778 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff); 1779 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff); 1780 } 1781 1782 for (rop = 0; rop < gr->rop_nr; rop++) { 1783 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000); 1784 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000); 1785 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff); 1786 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff); 1787 } 1788 1789 nvkm_wr32(device, 0x400108, 0xffffffff); 1790 nvkm_wr32(device, 0x400138, 0xffffffff); 1791 nvkm_wr32(device, 0x400118, 0xffffffff); 1792 nvkm_wr32(device, 0x400130, 0xffffffff); 1793 nvkm_wr32(device, 0x40011c, 0xffffffff); 1794 nvkm_wr32(device, 0x400134, 0xffffffff); 1795 1796 nvkm_wr32(device, 0x400054, 0x34ce3464); 1797 1798 gf100_gr_zbc_init(gr); 1799 1800 return gf100_gr_init_ctxctl(gr); 1801 } 1802 1803 #include "fuc/hubgf100.fuc3.h" 1804 1805 struct gf100_gr_ucode 1806 gf100_gr_fecs_ucode = { 1807 .code.data = gf100_grhub_code, 1808 .code.size = sizeof(gf100_grhub_code), 1809 .data.data = gf100_grhub_data, 1810 .data.size = sizeof(gf100_grhub_data), 1811 }; 1812 1813 #include "fuc/gpcgf100.fuc3.h" 1814 1815 struct gf100_gr_ucode 1816 gf100_gr_gpccs_ucode = { 1817 .code.data = gf100_grgpc_code, 1818 .code.size = sizeof(gf100_grgpc_code), 1819 .data.data = gf100_grgpc_data, 1820 .data.size = sizeof(gf100_grgpc_data), 1821 }; 1822 1823 static const struct gf100_gr_func 1824 gf100_gr = { 1825 .init = gf100_gr_init, 1826 .mmio = gf100_gr_pack_mmio, 1827 .fecs.ucode = &gf100_gr_fecs_ucode, 1828 .gpccs.ucode = &gf100_gr_gpccs_ucode, 1829 .grctx = &gf100_grctx, 1830 .sclass = { 1831 { -1, -1, FERMI_TWOD_A }, 1832 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A }, 1833 { -1, -1, FERMI_A, &gf100_fermi }, 1834 { -1, -1, FERMI_COMPUTE_A }, 1835 {} 1836 } 1837 }; 1838 1839 int 1840 gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 1841 { 1842 return gf100_gr_new_(&gf100_gr, device, index, pgr); 1843 } 1844