1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "gf100.h" 25 #include "ctxgf100.h" 26 #include "fuc/os.h" 27 28 #include <core/client.h> 29 #include <core/option.h> 30 #include <core/firmware.h> 31 #include <subdev/secboot.h> 32 #include <subdev/fb.h> 33 #include <subdev/mc.h> 34 #include <subdev/pmu.h> 35 #include <subdev/therm.h> 36 #include <subdev/timer.h> 37 #include <engine/fifo.h> 38 39 #include <nvif/class.h> 40 #include <nvif/cl9097.h> 41 #include <nvif/if900d.h> 42 #include <nvif/unpack.h> 43 44 /******************************************************************************* 45 * Zero Bandwidth Clear 46 ******************************************************************************/ 47 48 static void 49 gf100_gr_zbc_clear_color(struct gf100_gr *gr, int zbc) 50 { 51 struct nvkm_device *device = gr->base.engine.subdev.device; 52 if (gr->zbc_color[zbc].format) { 53 nvkm_wr32(device, 0x405804, gr->zbc_color[zbc].ds[0]); 54 nvkm_wr32(device, 0x405808, gr->zbc_color[zbc].ds[1]); 55 nvkm_wr32(device, 0x40580c, gr->zbc_color[zbc].ds[2]); 56 nvkm_wr32(device, 0x405810, gr->zbc_color[zbc].ds[3]); 57 } 58 nvkm_wr32(device, 0x405814, gr->zbc_color[zbc].format); 59 nvkm_wr32(device, 0x405820, zbc); 60 nvkm_wr32(device, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */ 61 } 62 63 static int 64 gf100_gr_zbc_color_get(struct gf100_gr *gr, int format, 65 const u32 ds[4], const u32 l2[4]) 66 { 67 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc; 68 int zbc = -ENOSPC, i; 69 70 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) { 71 if (gr->zbc_color[i].format) { 72 if (gr->zbc_color[i].format != format) 73 continue; 74 if (memcmp(gr->zbc_color[i].ds, ds, sizeof( 75 gr->zbc_color[i].ds))) 76 continue; 77 if (memcmp(gr->zbc_color[i].l2, l2, sizeof( 78 gr->zbc_color[i].l2))) { 79 WARN_ON(1); 80 return -EINVAL; 81 } 82 return i; 83 } else { 84 zbc = (zbc < 0) ? i : zbc; 85 } 86 } 87 88 if (zbc < 0) 89 return zbc; 90 91 memcpy(gr->zbc_color[zbc].ds, ds, sizeof(gr->zbc_color[zbc].ds)); 92 memcpy(gr->zbc_color[zbc].l2, l2, sizeof(gr->zbc_color[zbc].l2)); 93 gr->zbc_color[zbc].format = format; 94 nvkm_ltc_zbc_color_get(ltc, zbc, l2); 95 gr->func->zbc->clear_color(gr, zbc); 96 return zbc; 97 } 98 99 static void 100 gf100_gr_zbc_clear_depth(struct gf100_gr *gr, int zbc) 101 { 102 struct nvkm_device *device = gr->base.engine.subdev.device; 103 if (gr->zbc_depth[zbc].format) 104 nvkm_wr32(device, 0x405818, gr->zbc_depth[zbc].ds); 105 nvkm_wr32(device, 0x40581c, gr->zbc_depth[zbc].format); 106 nvkm_wr32(device, 0x405820, zbc); 107 nvkm_wr32(device, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */ 108 } 109 110 static int 111 gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format, 112 const u32 ds, const u32 l2) 113 { 114 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc; 115 int zbc = -ENOSPC, i; 116 117 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) { 118 if (gr->zbc_depth[i].format) { 119 if (gr->zbc_depth[i].format != format) 120 continue; 121 if (gr->zbc_depth[i].ds != ds) 122 continue; 123 if (gr->zbc_depth[i].l2 != l2) { 124 WARN_ON(1); 125 return -EINVAL; 126 } 127 return i; 128 } else { 129 zbc = (zbc < 0) ? i : zbc; 130 } 131 } 132 133 if (zbc < 0) 134 return zbc; 135 136 gr->zbc_depth[zbc].format = format; 137 gr->zbc_depth[zbc].ds = ds; 138 gr->zbc_depth[zbc].l2 = l2; 139 nvkm_ltc_zbc_depth_get(ltc, zbc, l2); 140 gr->func->zbc->clear_depth(gr, zbc); 141 return zbc; 142 } 143 144 const struct gf100_gr_func_zbc 145 gf100_gr_zbc = { 146 .clear_color = gf100_gr_zbc_clear_color, 147 .clear_depth = gf100_gr_zbc_clear_depth, 148 }; 149 150 /******************************************************************************* 151 * Graphics object classes 152 ******************************************************************************/ 153 #define gf100_gr_object(p) container_of((p), struct gf100_gr_object, object) 154 155 struct gf100_gr_object { 156 struct nvkm_object object; 157 struct gf100_gr_chan *chan; 158 }; 159 160 static int 161 gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) 162 { 163 struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine)); 164 union { 165 struct fermi_a_zbc_color_v0 v0; 166 } *args = data; 167 int ret = -ENOSYS; 168 169 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { 170 switch (args->v0.format) { 171 case FERMI_A_ZBC_COLOR_V0_FMT_ZERO: 172 case FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE: 173 case FERMI_A_ZBC_COLOR_V0_FMT_RF32_GF32_BF32_AF32: 174 case FERMI_A_ZBC_COLOR_V0_FMT_R16_G16_B16_A16: 175 case FERMI_A_ZBC_COLOR_V0_FMT_RN16_GN16_BN16_AN16: 176 case FERMI_A_ZBC_COLOR_V0_FMT_RS16_GS16_BS16_AS16: 177 case FERMI_A_ZBC_COLOR_V0_FMT_RU16_GU16_BU16_AU16: 178 case FERMI_A_ZBC_COLOR_V0_FMT_RF16_GF16_BF16_AF16: 179 case FERMI_A_ZBC_COLOR_V0_FMT_A8R8G8B8: 180 case FERMI_A_ZBC_COLOR_V0_FMT_A8RL8GL8BL8: 181 case FERMI_A_ZBC_COLOR_V0_FMT_A2B10G10R10: 182 case FERMI_A_ZBC_COLOR_V0_FMT_AU2BU10GU10RU10: 183 case FERMI_A_ZBC_COLOR_V0_FMT_A8B8G8R8: 184 case FERMI_A_ZBC_COLOR_V0_FMT_A8BL8GL8RL8: 185 case FERMI_A_ZBC_COLOR_V0_FMT_AN8BN8GN8RN8: 186 case FERMI_A_ZBC_COLOR_V0_FMT_AS8BS8GS8RS8: 187 case FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8: 188 case FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10: 189 case FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11: 190 ret = gf100_gr_zbc_color_get(gr, args->v0.format, 191 args->v0.ds, 192 args->v0.l2); 193 if (ret >= 0) { 194 args->v0.index = ret; 195 return 0; 196 } 197 break; 198 default: 199 return -EINVAL; 200 } 201 } 202 203 return ret; 204 } 205 206 static int 207 gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size) 208 { 209 struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine)); 210 union { 211 struct fermi_a_zbc_depth_v0 v0; 212 } *args = data; 213 int ret = -ENOSYS; 214 215 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { 216 switch (args->v0.format) { 217 case FERMI_A_ZBC_DEPTH_V0_FMT_FP32: 218 ret = gf100_gr_zbc_depth_get(gr, args->v0.format, 219 args->v0.ds, 220 args->v0.l2); 221 return (ret >= 0) ? 0 : -ENOSPC; 222 default: 223 return -EINVAL; 224 } 225 } 226 227 return ret; 228 } 229 230 static int 231 gf100_fermi_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) 232 { 233 nvif_ioctl(object, "fermi mthd %08x\n", mthd); 234 switch (mthd) { 235 case FERMI_A_ZBC_COLOR: 236 return gf100_fermi_mthd_zbc_color(object, data, size); 237 case FERMI_A_ZBC_DEPTH: 238 return gf100_fermi_mthd_zbc_depth(object, data, size); 239 default: 240 break; 241 } 242 return -EINVAL; 243 } 244 245 const struct nvkm_object_func 246 gf100_fermi = { 247 .mthd = gf100_fermi_mthd, 248 }; 249 250 static void 251 gf100_gr_mthd_set_shader_exceptions(struct nvkm_device *device, u32 data) 252 { 253 nvkm_wr32(device, 0x419e44, data ? 0xffffffff : 0x00000000); 254 nvkm_wr32(device, 0x419e4c, data ? 0xffffffff : 0x00000000); 255 } 256 257 static bool 258 gf100_gr_mthd_sw(struct nvkm_device *device, u16 class, u32 mthd, u32 data) 259 { 260 switch (class & 0x00ff) { 261 case 0x97: 262 case 0xc0: 263 switch (mthd) { 264 case 0x1528: 265 gf100_gr_mthd_set_shader_exceptions(device, data); 266 return true; 267 default: 268 break; 269 } 270 break; 271 default: 272 break; 273 } 274 return false; 275 } 276 277 static const struct nvkm_object_func 278 gf100_gr_object_func = { 279 }; 280 281 static int 282 gf100_gr_object_new(const struct nvkm_oclass *oclass, void *data, u32 size, 283 struct nvkm_object **pobject) 284 { 285 struct gf100_gr_chan *chan = gf100_gr_chan(oclass->parent); 286 struct gf100_gr_object *object; 287 288 if (!(object = kzalloc(sizeof(*object), GFP_KERNEL))) 289 return -ENOMEM; 290 *pobject = &object->object; 291 292 nvkm_object_ctor(oclass->base.func ? oclass->base.func : 293 &gf100_gr_object_func, oclass, &object->object); 294 object->chan = chan; 295 return 0; 296 } 297 298 static int 299 gf100_gr_object_get(struct nvkm_gr *base, int index, struct nvkm_sclass *sclass) 300 { 301 struct gf100_gr *gr = gf100_gr(base); 302 int c = 0; 303 304 while (gr->func->sclass[c].oclass) { 305 if (c++ == index) { 306 *sclass = gr->func->sclass[index]; 307 sclass->ctor = gf100_gr_object_new; 308 return index; 309 } 310 } 311 312 return c; 313 } 314 315 /******************************************************************************* 316 * PGRAPH context 317 ******************************************************************************/ 318 319 static int 320 gf100_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent, 321 int align, struct nvkm_gpuobj **pgpuobj) 322 { 323 struct gf100_gr_chan *chan = gf100_gr_chan(object); 324 struct gf100_gr *gr = chan->gr; 325 int ret, i; 326 327 ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size, 328 align, false, parent, pgpuobj); 329 if (ret) 330 return ret; 331 332 nvkm_kmap(*pgpuobj); 333 for (i = 0; i < gr->size; i += 4) 334 nvkm_wo32(*pgpuobj, i, gr->data[i / 4]); 335 336 if (!gr->firmware) { 337 nvkm_wo32(*pgpuobj, 0x00, chan->mmio_nr / 2); 338 nvkm_wo32(*pgpuobj, 0x04, chan->mmio_vma->addr >> 8); 339 } else { 340 nvkm_wo32(*pgpuobj, 0xf4, 0); 341 nvkm_wo32(*pgpuobj, 0xf8, 0); 342 nvkm_wo32(*pgpuobj, 0x10, chan->mmio_nr / 2); 343 nvkm_wo32(*pgpuobj, 0x14, lower_32_bits(chan->mmio_vma->addr)); 344 nvkm_wo32(*pgpuobj, 0x18, upper_32_bits(chan->mmio_vma->addr)); 345 nvkm_wo32(*pgpuobj, 0x1c, 1); 346 nvkm_wo32(*pgpuobj, 0x20, 0); 347 nvkm_wo32(*pgpuobj, 0x28, 0); 348 nvkm_wo32(*pgpuobj, 0x2c, 0); 349 } 350 nvkm_done(*pgpuobj); 351 return 0; 352 } 353 354 static void * 355 gf100_gr_chan_dtor(struct nvkm_object *object) 356 { 357 struct gf100_gr_chan *chan = gf100_gr_chan(object); 358 int i; 359 360 for (i = 0; i < ARRAY_SIZE(chan->data); i++) { 361 nvkm_vmm_put(chan->vmm, &chan->data[i].vma); 362 nvkm_memory_unref(&chan->data[i].mem); 363 } 364 365 nvkm_vmm_put(chan->vmm, &chan->mmio_vma); 366 nvkm_memory_unref(&chan->mmio); 367 nvkm_vmm_unref(&chan->vmm); 368 return chan; 369 } 370 371 static const struct nvkm_object_func 372 gf100_gr_chan = { 373 .dtor = gf100_gr_chan_dtor, 374 .bind = gf100_gr_chan_bind, 375 }; 376 377 static int 378 gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch, 379 const struct nvkm_oclass *oclass, 380 struct nvkm_object **pobject) 381 { 382 struct gf100_gr *gr = gf100_gr(base); 383 struct gf100_gr_data *data = gr->mmio_data; 384 struct gf100_gr_mmio *mmio = gr->mmio_list; 385 struct gf100_gr_chan *chan; 386 struct gf100_vmm_map_v0 args = { .priv = 1 }; 387 struct nvkm_device *device = gr->base.engine.subdev.device; 388 int ret, i; 389 390 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) 391 return -ENOMEM; 392 nvkm_object_ctor(&gf100_gr_chan, oclass, &chan->object); 393 chan->gr = gr; 394 chan->vmm = nvkm_vmm_ref(fifoch->vmm); 395 *pobject = &chan->object; 396 397 /* allocate memory for a "mmio list" buffer that's used by the HUB 398 * fuc to modify some per-context register settings on first load 399 * of the context. 400 */ 401 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x100, 402 false, &chan->mmio); 403 if (ret) 404 return ret; 405 406 ret = nvkm_vmm_get(fifoch->vmm, 12, 0x1000, &chan->mmio_vma); 407 if (ret) 408 return ret; 409 410 ret = nvkm_memory_map(chan->mmio, 0, fifoch->vmm, 411 chan->mmio_vma, &args, sizeof(args)); 412 if (ret) 413 return ret; 414 415 /* allocate buffers referenced by mmio list */ 416 for (i = 0; data->size && i < ARRAY_SIZE(gr->mmio_data); i++) { 417 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 418 data->size, data->align, false, 419 &chan->data[i].mem); 420 if (ret) 421 return ret; 422 423 ret = nvkm_vmm_get(fifoch->vmm, 12, 424 nvkm_memory_size(chan->data[i].mem), 425 &chan->data[i].vma); 426 if (ret) 427 return ret; 428 429 args.priv = data->priv; 430 431 ret = nvkm_memory_map(chan->data[i].mem, 0, chan->vmm, 432 chan->data[i].vma, &args, sizeof(args)); 433 if (ret) 434 return ret; 435 436 data++; 437 } 438 439 /* finally, fill in the mmio list and point the context at it */ 440 nvkm_kmap(chan->mmio); 441 for (i = 0; mmio->addr && i < ARRAY_SIZE(gr->mmio_list); i++) { 442 u32 addr = mmio->addr; 443 u32 data = mmio->data; 444 445 if (mmio->buffer >= 0) { 446 u64 info = chan->data[mmio->buffer].vma->addr; 447 data |= info >> mmio->shift; 448 } 449 450 nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, addr); 451 nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, data); 452 mmio++; 453 } 454 nvkm_done(chan->mmio); 455 return 0; 456 } 457 458 /******************************************************************************* 459 * PGRAPH register lists 460 ******************************************************************************/ 461 462 const struct gf100_gr_init 463 gf100_gr_init_main_0[] = { 464 { 0x400080, 1, 0x04, 0x003083c2 }, 465 { 0x400088, 1, 0x04, 0x00006fe7 }, 466 { 0x40008c, 1, 0x04, 0x00000000 }, 467 { 0x400090, 1, 0x04, 0x00000030 }, 468 { 0x40013c, 1, 0x04, 0x013901f7 }, 469 { 0x400140, 1, 0x04, 0x00000100 }, 470 { 0x400144, 1, 0x04, 0x00000000 }, 471 { 0x400148, 1, 0x04, 0x00000110 }, 472 { 0x400138, 1, 0x04, 0x00000000 }, 473 { 0x400130, 2, 0x04, 0x00000000 }, 474 { 0x400124, 1, 0x04, 0x00000002 }, 475 {} 476 }; 477 478 const struct gf100_gr_init 479 gf100_gr_init_fe_0[] = { 480 { 0x40415c, 1, 0x04, 0x00000000 }, 481 { 0x404170, 1, 0x04, 0x00000000 }, 482 {} 483 }; 484 485 const struct gf100_gr_init 486 gf100_gr_init_pri_0[] = { 487 { 0x404488, 2, 0x04, 0x00000000 }, 488 {} 489 }; 490 491 const struct gf100_gr_init 492 gf100_gr_init_rstr2d_0[] = { 493 { 0x407808, 1, 0x04, 0x00000000 }, 494 {} 495 }; 496 497 const struct gf100_gr_init 498 gf100_gr_init_pd_0[] = { 499 { 0x406024, 1, 0x04, 0x00000000 }, 500 {} 501 }; 502 503 const struct gf100_gr_init 504 gf100_gr_init_ds_0[] = { 505 { 0x405844, 1, 0x04, 0x00ffffff }, 506 { 0x405850, 1, 0x04, 0x00000000 }, 507 { 0x405908, 1, 0x04, 0x00000000 }, 508 {} 509 }; 510 511 const struct gf100_gr_init 512 gf100_gr_init_scc_0[] = { 513 { 0x40803c, 1, 0x04, 0x00000000 }, 514 {} 515 }; 516 517 const struct gf100_gr_init 518 gf100_gr_init_prop_0[] = { 519 { 0x4184a0, 1, 0x04, 0x00000000 }, 520 {} 521 }; 522 523 const struct gf100_gr_init 524 gf100_gr_init_gpc_unk_0[] = { 525 { 0x418604, 1, 0x04, 0x00000000 }, 526 { 0x418680, 1, 0x04, 0x00000000 }, 527 { 0x418714, 1, 0x04, 0x80000000 }, 528 { 0x418384, 1, 0x04, 0x00000000 }, 529 {} 530 }; 531 532 const struct gf100_gr_init 533 gf100_gr_init_setup_0[] = { 534 { 0x418814, 3, 0x04, 0x00000000 }, 535 {} 536 }; 537 538 const struct gf100_gr_init 539 gf100_gr_init_crstr_0[] = { 540 { 0x418b04, 1, 0x04, 0x00000000 }, 541 {} 542 }; 543 544 const struct gf100_gr_init 545 gf100_gr_init_setup_1[] = { 546 { 0x4188c8, 1, 0x04, 0x80000000 }, 547 { 0x4188cc, 1, 0x04, 0x00000000 }, 548 { 0x4188d0, 1, 0x04, 0x00010000 }, 549 { 0x4188d4, 1, 0x04, 0x00000001 }, 550 {} 551 }; 552 553 const struct gf100_gr_init 554 gf100_gr_init_zcull_0[] = { 555 { 0x418910, 1, 0x04, 0x00010001 }, 556 { 0x418914, 1, 0x04, 0x00000301 }, 557 { 0x418918, 1, 0x04, 0x00800000 }, 558 { 0x418980, 1, 0x04, 0x77777770 }, 559 { 0x418984, 3, 0x04, 0x77777777 }, 560 {} 561 }; 562 563 const struct gf100_gr_init 564 gf100_gr_init_gpm_0[] = { 565 { 0x418c04, 1, 0x04, 0x00000000 }, 566 { 0x418c88, 1, 0x04, 0x00000000 }, 567 {} 568 }; 569 570 const struct gf100_gr_init 571 gf100_gr_init_gpc_unk_1[] = { 572 { 0x418d00, 1, 0x04, 0x00000000 }, 573 { 0x418f08, 1, 0x04, 0x00000000 }, 574 { 0x418e00, 1, 0x04, 0x00000050 }, 575 { 0x418e08, 1, 0x04, 0x00000000 }, 576 {} 577 }; 578 579 const struct gf100_gr_init 580 gf100_gr_init_gcc_0[] = { 581 { 0x41900c, 1, 0x04, 0x00000000 }, 582 { 0x419018, 1, 0x04, 0x00000000 }, 583 {} 584 }; 585 586 const struct gf100_gr_init 587 gf100_gr_init_tpccs_0[] = { 588 { 0x419d08, 2, 0x04, 0x00000000 }, 589 { 0x419d10, 1, 0x04, 0x00000014 }, 590 {} 591 }; 592 593 const struct gf100_gr_init 594 gf100_gr_init_tex_0[] = { 595 { 0x419ab0, 1, 0x04, 0x00000000 }, 596 { 0x419ab8, 1, 0x04, 0x000000e7 }, 597 { 0x419abc, 2, 0x04, 0x00000000 }, 598 {} 599 }; 600 601 const struct gf100_gr_init 602 gf100_gr_init_pe_0[] = { 603 { 0x41980c, 3, 0x04, 0x00000000 }, 604 { 0x419844, 1, 0x04, 0x00000000 }, 605 { 0x41984c, 1, 0x04, 0x00005bc5 }, 606 { 0x419850, 4, 0x04, 0x00000000 }, 607 {} 608 }; 609 610 const struct gf100_gr_init 611 gf100_gr_init_l1c_0[] = { 612 { 0x419c98, 1, 0x04, 0x00000000 }, 613 { 0x419ca8, 1, 0x04, 0x80000000 }, 614 { 0x419cb4, 1, 0x04, 0x00000000 }, 615 { 0x419cb8, 1, 0x04, 0x00008bf4 }, 616 { 0x419cbc, 1, 0x04, 0x28137606 }, 617 { 0x419cc0, 2, 0x04, 0x00000000 }, 618 {} 619 }; 620 621 const struct gf100_gr_init 622 gf100_gr_init_wwdx_0[] = { 623 { 0x419bd4, 1, 0x04, 0x00800000 }, 624 { 0x419bdc, 1, 0x04, 0x00000000 }, 625 {} 626 }; 627 628 const struct gf100_gr_init 629 gf100_gr_init_tpccs_1[] = { 630 { 0x419d2c, 1, 0x04, 0x00000000 }, 631 {} 632 }; 633 634 const struct gf100_gr_init 635 gf100_gr_init_mpc_0[] = { 636 { 0x419c0c, 1, 0x04, 0x00000000 }, 637 {} 638 }; 639 640 static const struct gf100_gr_init 641 gf100_gr_init_sm_0[] = { 642 { 0x419e00, 1, 0x04, 0x00000000 }, 643 { 0x419ea0, 1, 0x04, 0x00000000 }, 644 { 0x419ea4, 1, 0x04, 0x00000100 }, 645 { 0x419ea8, 1, 0x04, 0x00001100 }, 646 { 0x419eac, 1, 0x04, 0x11100702 }, 647 { 0x419eb0, 1, 0x04, 0x00000003 }, 648 { 0x419eb4, 4, 0x04, 0x00000000 }, 649 { 0x419ec8, 1, 0x04, 0x06060618 }, 650 { 0x419ed0, 1, 0x04, 0x0eff0e38 }, 651 { 0x419ed4, 1, 0x04, 0x011104f1 }, 652 { 0x419edc, 1, 0x04, 0x00000000 }, 653 { 0x419f00, 1, 0x04, 0x00000000 }, 654 { 0x419f2c, 1, 0x04, 0x00000000 }, 655 {} 656 }; 657 658 const struct gf100_gr_init 659 gf100_gr_init_be_0[] = { 660 { 0x40880c, 1, 0x04, 0x00000000 }, 661 { 0x408910, 9, 0x04, 0x00000000 }, 662 { 0x408950, 1, 0x04, 0x00000000 }, 663 { 0x408954, 1, 0x04, 0x0000ffff }, 664 { 0x408984, 1, 0x04, 0x00000000 }, 665 { 0x408988, 1, 0x04, 0x08040201 }, 666 { 0x40898c, 1, 0x04, 0x80402010 }, 667 {} 668 }; 669 670 const struct gf100_gr_init 671 gf100_gr_init_fe_1[] = { 672 { 0x4040f0, 1, 0x04, 0x00000000 }, 673 {} 674 }; 675 676 const struct gf100_gr_init 677 gf100_gr_init_pe_1[] = { 678 { 0x419880, 1, 0x04, 0x00000002 }, 679 {} 680 }; 681 682 static const struct gf100_gr_pack 683 gf100_gr_pack_mmio[] = { 684 { gf100_gr_init_main_0 }, 685 { gf100_gr_init_fe_0 }, 686 { gf100_gr_init_pri_0 }, 687 { gf100_gr_init_rstr2d_0 }, 688 { gf100_gr_init_pd_0 }, 689 { gf100_gr_init_ds_0 }, 690 { gf100_gr_init_scc_0 }, 691 { gf100_gr_init_prop_0 }, 692 { gf100_gr_init_gpc_unk_0 }, 693 { gf100_gr_init_setup_0 }, 694 { gf100_gr_init_crstr_0 }, 695 { gf100_gr_init_setup_1 }, 696 { gf100_gr_init_zcull_0 }, 697 { gf100_gr_init_gpm_0 }, 698 { gf100_gr_init_gpc_unk_1 }, 699 { gf100_gr_init_gcc_0 }, 700 { gf100_gr_init_tpccs_0 }, 701 { gf100_gr_init_tex_0 }, 702 { gf100_gr_init_pe_0 }, 703 { gf100_gr_init_l1c_0 }, 704 { gf100_gr_init_wwdx_0 }, 705 { gf100_gr_init_tpccs_1 }, 706 { gf100_gr_init_mpc_0 }, 707 { gf100_gr_init_sm_0 }, 708 { gf100_gr_init_be_0 }, 709 { gf100_gr_init_fe_1 }, 710 { gf100_gr_init_pe_1 }, 711 {} 712 }; 713 714 /******************************************************************************* 715 * PGRAPH engine/subdev functions 716 ******************************************************************************/ 717 718 static int 719 gf100_gr_fecs_discover_zcull_image_size(struct gf100_gr *gr, u32 *psize) 720 { 721 struct nvkm_device *device = gr->base.engine.subdev.device; 722 723 nvkm_wr32(device, 0x409840, 0xffffffff); 724 nvkm_wr32(device, 0x409500, 0x00000000); 725 nvkm_wr32(device, 0x409504, 0x00000016); 726 nvkm_msec(device, 2000, 727 if ((*psize = nvkm_rd32(device, 0x409800))) 728 return 0; 729 ); 730 731 return -ETIMEDOUT; 732 } 733 734 static int 735 gf100_gr_fecs_discover_image_size(struct gf100_gr *gr, u32 *psize) 736 { 737 struct nvkm_device *device = gr->base.engine.subdev.device; 738 739 nvkm_wr32(device, 0x409840, 0xffffffff); 740 nvkm_wr32(device, 0x409500, 0x00000000); 741 nvkm_wr32(device, 0x409504, 0x00000010); 742 nvkm_msec(device, 2000, 743 if ((*psize = nvkm_rd32(device, 0x409800))) 744 return 0; 745 ); 746 747 return -ETIMEDOUT; 748 } 749 750 static void 751 gf100_gr_fecs_set_watchdog_timeout(struct gf100_gr *gr, u32 timeout) 752 { 753 struct nvkm_device *device = gr->base.engine.subdev.device; 754 755 nvkm_wr32(device, 0x409840, 0xffffffff); 756 nvkm_wr32(device, 0x409500, timeout); 757 nvkm_wr32(device, 0x409504, 0x00000021); 758 } 759 760 static bool 761 gf100_gr_chsw_load(struct nvkm_gr *base) 762 { 763 struct gf100_gr *gr = gf100_gr(base); 764 if (!gr->firmware) { 765 u32 trace = nvkm_rd32(gr->base.engine.subdev.device, 0x40981c); 766 if (trace & 0x00000040) 767 return true; 768 } else { 769 u32 mthd = nvkm_rd32(gr->base.engine.subdev.device, 0x409808); 770 if (mthd & 0x00080000) 771 return true; 772 } 773 return false; 774 } 775 776 int 777 gf100_gr_rops(struct gf100_gr *gr) 778 { 779 struct nvkm_device *device = gr->base.engine.subdev.device; 780 return (nvkm_rd32(device, 0x409604) & 0x001f0000) >> 16; 781 } 782 783 void 784 gf100_gr_zbc_init(struct gf100_gr *gr) 785 { 786 const u32 zero[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 787 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; 788 const u32 one[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 789 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }; 790 const u32 f32_0[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 791 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; 792 const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 793 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 }; 794 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc; 795 int index, c = ltc->zbc_min, d = ltc->zbc_min, s = ltc->zbc_min; 796 797 if (!gr->zbc_color[0].format) { 798 gf100_gr_zbc_color_get(gr, 1, & zero[0], &zero[4]); c++; 799 gf100_gr_zbc_color_get(gr, 2, & one[0], &one[4]); c++; 800 gf100_gr_zbc_color_get(gr, 4, &f32_0[0], &f32_0[4]); c++; 801 gf100_gr_zbc_color_get(gr, 4, &f32_1[0], &f32_1[4]); c++; 802 gf100_gr_zbc_depth_get(gr, 1, 0x00000000, 0x00000000); d++; 803 gf100_gr_zbc_depth_get(gr, 1, 0x3f800000, 0x3f800000); d++; 804 if (gr->func->zbc->stencil_get) { 805 gr->func->zbc->stencil_get(gr, 1, 0x00, 0x00); s++; 806 gr->func->zbc->stencil_get(gr, 1, 0x01, 0x01); s++; 807 gr->func->zbc->stencil_get(gr, 1, 0xff, 0xff); s++; 808 } 809 } 810 811 for (index = c; index <= ltc->zbc_max; index++) 812 gr->func->zbc->clear_color(gr, index); 813 for (index = d; index <= ltc->zbc_max; index++) 814 gr->func->zbc->clear_depth(gr, index); 815 816 if (gr->func->zbc->clear_stencil) { 817 for (index = s; index <= ltc->zbc_max; index++) 818 gr->func->zbc->clear_stencil(gr, index); 819 } 820 } 821 822 /** 823 * Wait until GR goes idle. GR is considered idle if it is disabled by the 824 * MC (0x200) register, or GR is not busy and a context switch is not in 825 * progress. 826 */ 827 int 828 gf100_gr_wait_idle(struct gf100_gr *gr) 829 { 830 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 831 struct nvkm_device *device = subdev->device; 832 unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000); 833 bool gr_enabled, ctxsw_active, gr_busy; 834 835 do { 836 /* 837 * required to make sure FIFO_ENGINE_STATUS (0x2640) is 838 * up-to-date 839 */ 840 nvkm_rd32(device, 0x400700); 841 842 gr_enabled = nvkm_rd32(device, 0x200) & 0x1000; 843 ctxsw_active = nvkm_rd32(device, 0x2640) & 0x8000; 844 gr_busy = nvkm_rd32(device, 0x40060c) & 0x1; 845 846 if (!gr_enabled || (!gr_busy && !ctxsw_active)) 847 return 0; 848 } while (time_before(jiffies, end_jiffies)); 849 850 nvkm_error(subdev, 851 "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n", 852 gr_enabled, ctxsw_active, gr_busy); 853 return -EAGAIN; 854 } 855 856 void 857 gf100_gr_mmio(struct gf100_gr *gr, const struct gf100_gr_pack *p) 858 { 859 struct nvkm_device *device = gr->base.engine.subdev.device; 860 const struct gf100_gr_pack *pack; 861 const struct gf100_gr_init *init; 862 863 pack_for_each_init(init, pack, p) { 864 u32 next = init->addr + init->count * init->pitch; 865 u32 addr = init->addr; 866 while (addr < next) { 867 nvkm_wr32(device, addr, init->data); 868 addr += init->pitch; 869 } 870 } 871 } 872 873 void 874 gf100_gr_icmd(struct gf100_gr *gr, const struct gf100_gr_pack *p) 875 { 876 struct nvkm_device *device = gr->base.engine.subdev.device; 877 const struct gf100_gr_pack *pack; 878 const struct gf100_gr_init *init; 879 u32 data = 0; 880 881 nvkm_wr32(device, 0x400208, 0x80000000); 882 883 pack_for_each_init(init, pack, p) { 884 u32 next = init->addr + init->count * init->pitch; 885 u32 addr = init->addr; 886 887 if ((pack == p && init == p->init) || data != init->data) { 888 nvkm_wr32(device, 0x400204, init->data); 889 data = init->data; 890 } 891 892 while (addr < next) { 893 nvkm_wr32(device, 0x400200, addr); 894 /** 895 * Wait for GR to go idle after submitting a 896 * GO_IDLE bundle 897 */ 898 if ((addr & 0xffff) == 0xe100) 899 gf100_gr_wait_idle(gr); 900 nvkm_msec(device, 2000, 901 if (!(nvkm_rd32(device, 0x400700) & 0x00000004)) 902 break; 903 ); 904 addr += init->pitch; 905 } 906 } 907 908 nvkm_wr32(device, 0x400208, 0x00000000); 909 } 910 911 void 912 gf100_gr_mthd(struct gf100_gr *gr, const struct gf100_gr_pack *p) 913 { 914 struct nvkm_device *device = gr->base.engine.subdev.device; 915 const struct gf100_gr_pack *pack; 916 const struct gf100_gr_init *init; 917 u32 data = 0; 918 919 pack_for_each_init(init, pack, p) { 920 u32 ctrl = 0x80000000 | pack->type; 921 u32 next = init->addr + init->count * init->pitch; 922 u32 addr = init->addr; 923 924 if ((pack == p && init == p->init) || data != init->data) { 925 nvkm_wr32(device, 0x40448c, init->data); 926 data = init->data; 927 } 928 929 while (addr < next) { 930 nvkm_wr32(device, 0x404488, ctrl | (addr << 14)); 931 addr += init->pitch; 932 } 933 } 934 } 935 936 u64 937 gf100_gr_units(struct nvkm_gr *base) 938 { 939 struct gf100_gr *gr = gf100_gr(base); 940 u64 cfg; 941 942 cfg = (u32)gr->gpc_nr; 943 cfg |= (u32)gr->tpc_total << 8; 944 cfg |= (u64)gr->rop_nr << 32; 945 946 return cfg; 947 } 948 949 static const struct nvkm_bitfield gf100_dispatch_error[] = { 950 { 0x00000001, "INJECTED_BUNDLE_ERROR" }, 951 { 0x00000002, "CLASS_SUBCH_MISMATCH" }, 952 { 0x00000004, "SUBCHSW_DURING_NOTIFY" }, 953 {} 954 }; 955 956 static const struct nvkm_bitfield gf100_m2mf_error[] = { 957 { 0x00000001, "PUSH_TOO_MUCH_DATA" }, 958 { 0x00000002, "PUSH_NOT_ENOUGH_DATA" }, 959 {} 960 }; 961 962 static const struct nvkm_bitfield gf100_unk6_error[] = { 963 { 0x00000001, "TEMP_TOO_SMALL" }, 964 {} 965 }; 966 967 static const struct nvkm_bitfield gf100_ccache_error[] = { 968 { 0x00000001, "INTR" }, 969 { 0x00000002, "LDCONST_OOB" }, 970 {} 971 }; 972 973 static const struct nvkm_bitfield gf100_macro_error[] = { 974 { 0x00000001, "TOO_FEW_PARAMS" }, 975 { 0x00000002, "TOO_MANY_PARAMS" }, 976 { 0x00000004, "ILLEGAL_OPCODE" }, 977 { 0x00000008, "DOUBLE_BRANCH" }, 978 { 0x00000010, "WATCHDOG" }, 979 {} 980 }; 981 982 static const struct nvkm_bitfield gk104_sked_error[] = { 983 { 0x00000040, "CTA_RESUME" }, 984 { 0x00000080, "CONSTANT_BUFFER_SIZE" }, 985 { 0x00000200, "LOCAL_MEMORY_SIZE_POS" }, 986 { 0x00000400, "LOCAL_MEMORY_SIZE_NEG" }, 987 { 0x00000800, "WARP_CSTACK_SIZE" }, 988 { 0x00001000, "TOTAL_TEMP_SIZE" }, 989 { 0x00002000, "REGISTER_COUNT" }, 990 { 0x00040000, "TOTAL_THREADS" }, 991 { 0x00100000, "PROGRAM_OFFSET" }, 992 { 0x00200000, "SHARED_MEMORY_SIZE" }, 993 { 0x00800000, "CTA_THREAD_DIMENSION_ZERO" }, 994 { 0x01000000, "MEMORY_WINDOW_OVERLAP" }, 995 { 0x02000000, "SHARED_CONFIG_TOO_SMALL" }, 996 { 0x04000000, "TOTAL_REGISTER_COUNT" }, 997 {} 998 }; 999 1000 static const struct nvkm_bitfield gf100_gpc_rop_error[] = { 1001 { 0x00000002, "RT_PITCH_OVERRUN" }, 1002 { 0x00000010, "RT_WIDTH_OVERRUN" }, 1003 { 0x00000020, "RT_HEIGHT_OVERRUN" }, 1004 { 0x00000080, "ZETA_STORAGE_TYPE_MISMATCH" }, 1005 { 0x00000100, "RT_STORAGE_TYPE_MISMATCH" }, 1006 { 0x00000400, "RT_LINEAR_MISMATCH" }, 1007 {} 1008 }; 1009 1010 static void 1011 gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc) 1012 { 1013 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1014 struct nvkm_device *device = subdev->device; 1015 char error[128]; 1016 u32 trap[4]; 1017 1018 trap[0] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0420)) & 0x3fffffff; 1019 trap[1] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0434)); 1020 trap[2] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0438)); 1021 trap[3] = nvkm_rd32(device, GPC_UNIT(gpc, 0x043c)); 1022 1023 nvkm_snprintbf(error, sizeof(error), gf100_gpc_rop_error, trap[0]); 1024 1025 nvkm_error(subdev, "GPC%d/PROP trap: %08x [%s] x = %u, y = %u, " 1026 "format = %x, storage type = %x\n", 1027 gpc, trap[0], error, trap[1] & 0xffff, trap[1] >> 16, 1028 (trap[2] >> 8) & 0x3f, trap[3] & 0xff); 1029 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000); 1030 } 1031 1032 const struct nvkm_enum gf100_mp_warp_error[] = { 1033 { 0x01, "STACK_ERROR" }, 1034 { 0x02, "API_STACK_ERROR" }, 1035 { 0x03, "RET_EMPTY_STACK_ERROR" }, 1036 { 0x04, "PC_WRAP" }, 1037 { 0x05, "MISALIGNED_PC" }, 1038 { 0x06, "PC_OVERFLOW" }, 1039 { 0x07, "MISALIGNED_IMMC_ADDR" }, 1040 { 0x08, "MISALIGNED_REG" }, 1041 { 0x09, "ILLEGAL_INSTR_ENCODING" }, 1042 { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" }, 1043 { 0x0b, "ILLEGAL_INSTR_PARAM" }, 1044 { 0x0c, "INVALID_CONST_ADDR" }, 1045 { 0x0d, "OOR_REG" }, 1046 { 0x0e, "OOR_ADDR" }, 1047 { 0x0f, "MISALIGNED_ADDR" }, 1048 { 0x10, "INVALID_ADDR_SPACE" }, 1049 { 0x11, "ILLEGAL_INSTR_PARAM2" }, 1050 { 0x12, "INVALID_CONST_ADDR_LDC" }, 1051 { 0x13, "GEOMETRY_SM_ERROR" }, 1052 { 0x14, "DIVERGENT" }, 1053 { 0x15, "WARP_EXIT" }, 1054 {} 1055 }; 1056 1057 const struct nvkm_bitfield gf100_mp_global_error[] = { 1058 { 0x00000001, "SM_TO_SM_FAULT" }, 1059 { 0x00000002, "L1_ERROR" }, 1060 { 0x00000004, "MULTIPLE_WARP_ERRORS" }, 1061 { 0x00000008, "PHYSICAL_STACK_OVERFLOW" }, 1062 { 0x00000010, "BPT_INT" }, 1063 { 0x00000020, "BPT_PAUSE" }, 1064 { 0x00000040, "SINGLE_STEP_COMPLETE" }, 1065 { 0x20000000, "ECC_SEC_ERROR" }, 1066 { 0x40000000, "ECC_DED_ERROR" }, 1067 { 0x80000000, "TIMEOUT" }, 1068 {} 1069 }; 1070 1071 void 1072 gf100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc) 1073 { 1074 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1075 struct nvkm_device *device = subdev->device; 1076 u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x648)); 1077 u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x650)); 1078 const struct nvkm_enum *warp; 1079 char glob[128]; 1080 1081 nvkm_snprintbf(glob, sizeof(glob), gf100_mp_global_error, gerr); 1082 warp = nvkm_enum_find(gf100_mp_warp_error, werr & 0xffff); 1083 1084 nvkm_error(subdev, "GPC%i/TPC%i/MP trap: " 1085 "global %08x [%s] warp %04x [%s]\n", 1086 gpc, tpc, gerr, glob, werr, warp ? warp->name : ""); 1087 1088 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x648), 0x00000000); 1089 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x650), gerr); 1090 } 1091 1092 static void 1093 gf100_gr_trap_tpc(struct gf100_gr *gr, int gpc, int tpc) 1094 { 1095 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1096 struct nvkm_device *device = subdev->device; 1097 u32 stat = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0508)); 1098 1099 if (stat & 0x00000001) { 1100 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0224)); 1101 nvkm_error(subdev, "GPC%d/TPC%d/TEX: %08x\n", gpc, tpc, trap); 1102 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000); 1103 stat &= ~0x00000001; 1104 } 1105 1106 if (stat & 0x00000002) { 1107 gr->func->trap_mp(gr, gpc, tpc); 1108 stat &= ~0x00000002; 1109 } 1110 1111 if (stat & 0x00000004) { 1112 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0084)); 1113 nvkm_error(subdev, "GPC%d/TPC%d/POLY: %08x\n", gpc, tpc, trap); 1114 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000); 1115 stat &= ~0x00000004; 1116 } 1117 1118 if (stat & 0x00000008) { 1119 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x048c)); 1120 nvkm_error(subdev, "GPC%d/TPC%d/L1C: %08x\n", gpc, tpc, trap); 1121 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000); 1122 stat &= ~0x00000008; 1123 } 1124 1125 if (stat & 0x00000010) { 1126 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0430)); 1127 nvkm_error(subdev, "GPC%d/TPC%d/MPC: %08x\n", gpc, tpc, trap); 1128 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0430), 0xc0000000); 1129 stat &= ~0x00000010; 1130 } 1131 1132 if (stat) { 1133 nvkm_error(subdev, "GPC%d/TPC%d/%08x: unknown\n", gpc, tpc, stat); 1134 } 1135 } 1136 1137 static void 1138 gf100_gr_trap_gpc(struct gf100_gr *gr, int gpc) 1139 { 1140 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1141 struct nvkm_device *device = subdev->device; 1142 u32 stat = nvkm_rd32(device, GPC_UNIT(gpc, 0x2c90)); 1143 int tpc; 1144 1145 if (stat & 0x00000001) { 1146 gf100_gr_trap_gpc_rop(gr, gpc); 1147 stat &= ~0x00000001; 1148 } 1149 1150 if (stat & 0x00000002) { 1151 u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0900)); 1152 nvkm_error(subdev, "GPC%d/ZCULL: %08x\n", gpc, trap); 1153 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000); 1154 stat &= ~0x00000002; 1155 } 1156 1157 if (stat & 0x00000004) { 1158 u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x1028)); 1159 nvkm_error(subdev, "GPC%d/CCACHE: %08x\n", gpc, trap); 1160 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000); 1161 stat &= ~0x00000004; 1162 } 1163 1164 if (stat & 0x00000008) { 1165 u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0824)); 1166 nvkm_error(subdev, "GPC%d/ESETUP: %08x\n", gpc, trap); 1167 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000); 1168 stat &= ~0x00000009; 1169 } 1170 1171 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) { 1172 u32 mask = 0x00010000 << tpc; 1173 if (stat & mask) { 1174 gf100_gr_trap_tpc(gr, gpc, tpc); 1175 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), mask); 1176 stat &= ~mask; 1177 } 1178 } 1179 1180 if (stat) { 1181 nvkm_error(subdev, "GPC%d/%08x: unknown\n", gpc, stat); 1182 } 1183 } 1184 1185 static void 1186 gf100_gr_trap_intr(struct gf100_gr *gr) 1187 { 1188 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1189 struct nvkm_device *device = subdev->device; 1190 char error[128]; 1191 u32 trap = nvkm_rd32(device, 0x400108); 1192 int rop, gpc; 1193 1194 if (trap & 0x00000001) { 1195 u32 stat = nvkm_rd32(device, 0x404000); 1196 1197 nvkm_snprintbf(error, sizeof(error), gf100_dispatch_error, 1198 stat & 0x3fffffff); 1199 nvkm_error(subdev, "DISPATCH %08x [%s]\n", stat, error); 1200 nvkm_wr32(device, 0x404000, 0xc0000000); 1201 nvkm_wr32(device, 0x400108, 0x00000001); 1202 trap &= ~0x00000001; 1203 } 1204 1205 if (trap & 0x00000002) { 1206 u32 stat = nvkm_rd32(device, 0x404600); 1207 1208 nvkm_snprintbf(error, sizeof(error), gf100_m2mf_error, 1209 stat & 0x3fffffff); 1210 nvkm_error(subdev, "M2MF %08x [%s]\n", stat, error); 1211 1212 nvkm_wr32(device, 0x404600, 0xc0000000); 1213 nvkm_wr32(device, 0x400108, 0x00000002); 1214 trap &= ~0x00000002; 1215 } 1216 1217 if (trap & 0x00000008) { 1218 u32 stat = nvkm_rd32(device, 0x408030); 1219 1220 nvkm_snprintbf(error, sizeof(error), gf100_ccache_error, 1221 stat & 0x3fffffff); 1222 nvkm_error(subdev, "CCACHE %08x [%s]\n", stat, error); 1223 nvkm_wr32(device, 0x408030, 0xc0000000); 1224 nvkm_wr32(device, 0x400108, 0x00000008); 1225 trap &= ~0x00000008; 1226 } 1227 1228 if (trap & 0x00000010) { 1229 u32 stat = nvkm_rd32(device, 0x405840); 1230 nvkm_error(subdev, "SHADER %08x, sph: 0x%06x, stage: 0x%02x\n", 1231 stat, stat & 0xffffff, (stat >> 24) & 0x3f); 1232 nvkm_wr32(device, 0x405840, 0xc0000000); 1233 nvkm_wr32(device, 0x400108, 0x00000010); 1234 trap &= ~0x00000010; 1235 } 1236 1237 if (trap & 0x00000040) { 1238 u32 stat = nvkm_rd32(device, 0x40601c); 1239 1240 nvkm_snprintbf(error, sizeof(error), gf100_unk6_error, 1241 stat & 0x3fffffff); 1242 nvkm_error(subdev, "UNK6 %08x [%s]\n", stat, error); 1243 1244 nvkm_wr32(device, 0x40601c, 0xc0000000); 1245 nvkm_wr32(device, 0x400108, 0x00000040); 1246 trap &= ~0x00000040; 1247 } 1248 1249 if (trap & 0x00000080) { 1250 u32 stat = nvkm_rd32(device, 0x404490); 1251 u32 pc = nvkm_rd32(device, 0x404494); 1252 u32 op = nvkm_rd32(device, 0x40449c); 1253 1254 nvkm_snprintbf(error, sizeof(error), gf100_macro_error, 1255 stat & 0x1fffffff); 1256 nvkm_error(subdev, "MACRO %08x [%s], pc: 0x%03x%s, op: 0x%08x\n", 1257 stat, error, pc & 0x7ff, 1258 (pc & 0x10000000) ? "" : " (invalid)", 1259 op); 1260 1261 nvkm_wr32(device, 0x404490, 0xc0000000); 1262 nvkm_wr32(device, 0x400108, 0x00000080); 1263 trap &= ~0x00000080; 1264 } 1265 1266 if (trap & 0x00000100) { 1267 u32 stat = nvkm_rd32(device, 0x407020) & 0x3fffffff; 1268 1269 nvkm_snprintbf(error, sizeof(error), gk104_sked_error, stat); 1270 nvkm_error(subdev, "SKED: %08x [%s]\n", stat, error); 1271 1272 if (stat) 1273 nvkm_wr32(device, 0x407020, 0x40000000); 1274 nvkm_wr32(device, 0x400108, 0x00000100); 1275 trap &= ~0x00000100; 1276 } 1277 1278 if (trap & 0x01000000) { 1279 u32 stat = nvkm_rd32(device, 0x400118); 1280 for (gpc = 0; stat && gpc < gr->gpc_nr; gpc++) { 1281 u32 mask = 0x00000001 << gpc; 1282 if (stat & mask) { 1283 gf100_gr_trap_gpc(gr, gpc); 1284 nvkm_wr32(device, 0x400118, mask); 1285 stat &= ~mask; 1286 } 1287 } 1288 nvkm_wr32(device, 0x400108, 0x01000000); 1289 trap &= ~0x01000000; 1290 } 1291 1292 if (trap & 0x02000000) { 1293 for (rop = 0; rop < gr->rop_nr; rop++) { 1294 u32 statz = nvkm_rd32(device, ROP_UNIT(rop, 0x070)); 1295 u32 statc = nvkm_rd32(device, ROP_UNIT(rop, 0x144)); 1296 nvkm_error(subdev, "ROP%d %08x %08x\n", 1297 rop, statz, statc); 1298 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000); 1299 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000); 1300 } 1301 nvkm_wr32(device, 0x400108, 0x02000000); 1302 trap &= ~0x02000000; 1303 } 1304 1305 if (trap) { 1306 nvkm_error(subdev, "TRAP UNHANDLED %08x\n", trap); 1307 nvkm_wr32(device, 0x400108, trap); 1308 } 1309 } 1310 1311 static void 1312 gf100_gr_ctxctl_debug_unit(struct gf100_gr *gr, u32 base) 1313 { 1314 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1315 struct nvkm_device *device = subdev->device; 1316 nvkm_error(subdev, "%06x - done %08x\n", base, 1317 nvkm_rd32(device, base + 0x400)); 1318 nvkm_error(subdev, "%06x - stat %08x %08x %08x %08x\n", base, 1319 nvkm_rd32(device, base + 0x800), 1320 nvkm_rd32(device, base + 0x804), 1321 nvkm_rd32(device, base + 0x808), 1322 nvkm_rd32(device, base + 0x80c)); 1323 nvkm_error(subdev, "%06x - stat %08x %08x %08x %08x\n", base, 1324 nvkm_rd32(device, base + 0x810), 1325 nvkm_rd32(device, base + 0x814), 1326 nvkm_rd32(device, base + 0x818), 1327 nvkm_rd32(device, base + 0x81c)); 1328 } 1329 1330 void 1331 gf100_gr_ctxctl_debug(struct gf100_gr *gr) 1332 { 1333 struct nvkm_device *device = gr->base.engine.subdev.device; 1334 u32 gpcnr = nvkm_rd32(device, 0x409604) & 0xffff; 1335 u32 gpc; 1336 1337 gf100_gr_ctxctl_debug_unit(gr, 0x409000); 1338 for (gpc = 0; gpc < gpcnr; gpc++) 1339 gf100_gr_ctxctl_debug_unit(gr, 0x502000 + (gpc * 0x8000)); 1340 } 1341 1342 static void 1343 gf100_gr_ctxctl_isr(struct gf100_gr *gr) 1344 { 1345 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1346 struct nvkm_device *device = subdev->device; 1347 u32 stat = nvkm_rd32(device, 0x409c18); 1348 1349 if (!gr->firmware && (stat & 0x00000001)) { 1350 u32 code = nvkm_rd32(device, 0x409814); 1351 if (code == E_BAD_FWMTHD) { 1352 u32 class = nvkm_rd32(device, 0x409808); 1353 u32 addr = nvkm_rd32(device, 0x40980c); 1354 u32 subc = (addr & 0x00070000) >> 16; 1355 u32 mthd = (addr & 0x00003ffc); 1356 u32 data = nvkm_rd32(device, 0x409810); 1357 1358 nvkm_error(subdev, "FECS MTHD subc %d class %04x " 1359 "mthd %04x data %08x\n", 1360 subc, class, mthd, data); 1361 } else { 1362 nvkm_error(subdev, "FECS ucode error %d\n", code); 1363 } 1364 nvkm_wr32(device, 0x409c20, 0x00000001); 1365 stat &= ~0x00000001; 1366 } 1367 1368 if (!gr->firmware && (stat & 0x00080000)) { 1369 nvkm_error(subdev, "FECS watchdog timeout\n"); 1370 gf100_gr_ctxctl_debug(gr); 1371 nvkm_wr32(device, 0x409c20, 0x00080000); 1372 stat &= ~0x00080000; 1373 } 1374 1375 if (stat) { 1376 nvkm_error(subdev, "FECS %08x\n", stat); 1377 gf100_gr_ctxctl_debug(gr); 1378 nvkm_wr32(device, 0x409c20, stat); 1379 } 1380 } 1381 1382 static void 1383 gf100_gr_intr(struct nvkm_gr *base) 1384 { 1385 struct gf100_gr *gr = gf100_gr(base); 1386 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1387 struct nvkm_device *device = subdev->device; 1388 struct nvkm_fifo_chan *chan; 1389 unsigned long flags; 1390 u64 inst = nvkm_rd32(device, 0x409b00) & 0x0fffffff; 1391 u32 stat = nvkm_rd32(device, 0x400100); 1392 u32 addr = nvkm_rd32(device, 0x400704); 1393 u32 mthd = (addr & 0x00003ffc); 1394 u32 subc = (addr & 0x00070000) >> 16; 1395 u32 data = nvkm_rd32(device, 0x400708); 1396 u32 code = nvkm_rd32(device, 0x400110); 1397 u32 class; 1398 const char *name = "unknown"; 1399 int chid = -1; 1400 1401 chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags); 1402 if (chan) { 1403 name = chan->object.client->name; 1404 chid = chan->chid; 1405 } 1406 1407 if (device->card_type < NV_E0 || subc < 4) 1408 class = nvkm_rd32(device, 0x404200 + (subc * 4)); 1409 else 1410 class = 0x0000; 1411 1412 if (stat & 0x00000001) { 1413 /* 1414 * notifier interrupt, only needed for cyclestats 1415 * can be safely ignored 1416 */ 1417 nvkm_wr32(device, 0x400100, 0x00000001); 1418 stat &= ~0x00000001; 1419 } 1420 1421 if (stat & 0x00000010) { 1422 if (!gf100_gr_mthd_sw(device, class, mthd, data)) { 1423 nvkm_error(subdev, "ILLEGAL_MTHD ch %d [%010llx %s] " 1424 "subc %d class %04x mthd %04x data %08x\n", 1425 chid, inst << 12, name, subc, 1426 class, mthd, data); 1427 } 1428 nvkm_wr32(device, 0x400100, 0x00000010); 1429 stat &= ~0x00000010; 1430 } 1431 1432 if (stat & 0x00000020) { 1433 nvkm_error(subdev, "ILLEGAL_CLASS ch %d [%010llx %s] " 1434 "subc %d class %04x mthd %04x data %08x\n", 1435 chid, inst << 12, name, subc, class, mthd, data); 1436 nvkm_wr32(device, 0x400100, 0x00000020); 1437 stat &= ~0x00000020; 1438 } 1439 1440 if (stat & 0x00100000) { 1441 const struct nvkm_enum *en = 1442 nvkm_enum_find(nv50_data_error_names, code); 1443 nvkm_error(subdev, "DATA_ERROR %08x [%s] ch %d [%010llx %s] " 1444 "subc %d class %04x mthd %04x data %08x\n", 1445 code, en ? en->name : "", chid, inst << 12, 1446 name, subc, class, mthd, data); 1447 nvkm_wr32(device, 0x400100, 0x00100000); 1448 stat &= ~0x00100000; 1449 } 1450 1451 if (stat & 0x00200000) { 1452 nvkm_error(subdev, "TRAP ch %d [%010llx %s]\n", 1453 chid, inst << 12, name); 1454 gf100_gr_trap_intr(gr); 1455 nvkm_wr32(device, 0x400100, 0x00200000); 1456 stat &= ~0x00200000; 1457 } 1458 1459 if (stat & 0x00080000) { 1460 gf100_gr_ctxctl_isr(gr); 1461 nvkm_wr32(device, 0x400100, 0x00080000); 1462 stat &= ~0x00080000; 1463 } 1464 1465 if (stat) { 1466 nvkm_error(subdev, "intr %08x\n", stat); 1467 nvkm_wr32(device, 0x400100, stat); 1468 } 1469 1470 nvkm_wr32(device, 0x400500, 0x00010001); 1471 nvkm_fifo_chan_put(device->fifo, flags, &chan); 1472 } 1473 1474 static void 1475 gf100_gr_init_fw(struct nvkm_falcon *falcon, 1476 struct gf100_gr_fuc *code, struct gf100_gr_fuc *data) 1477 { 1478 nvkm_falcon_load_dmem(falcon, data->data, 0x0, data->size, 0); 1479 nvkm_falcon_load_imem(falcon, code->data, 0x0, code->size, 0, 0, false); 1480 } 1481 1482 static void 1483 gf100_gr_init_csdata(struct gf100_gr *gr, 1484 const struct gf100_gr_pack *pack, 1485 u32 falcon, u32 starstar, u32 base) 1486 { 1487 struct nvkm_device *device = gr->base.engine.subdev.device; 1488 const struct gf100_gr_pack *iter; 1489 const struct gf100_gr_init *init; 1490 u32 addr = ~0, prev = ~0, xfer = 0; 1491 u32 star, temp; 1492 1493 nvkm_wr32(device, falcon + 0x01c0, 0x02000000 + starstar); 1494 star = nvkm_rd32(device, falcon + 0x01c4); 1495 temp = nvkm_rd32(device, falcon + 0x01c4); 1496 if (temp > star) 1497 star = temp; 1498 nvkm_wr32(device, falcon + 0x01c0, 0x01000000 + star); 1499 1500 pack_for_each_init(init, iter, pack) { 1501 u32 head = init->addr - base; 1502 u32 tail = head + init->count * init->pitch; 1503 while (head < tail) { 1504 if (head != prev + 4 || xfer >= 32) { 1505 if (xfer) { 1506 u32 data = ((--xfer << 26) | addr); 1507 nvkm_wr32(device, falcon + 0x01c4, data); 1508 star += 4; 1509 } 1510 addr = head; 1511 xfer = 0; 1512 } 1513 prev = head; 1514 xfer = xfer + 1; 1515 head = head + init->pitch; 1516 } 1517 } 1518 1519 nvkm_wr32(device, falcon + 0x01c4, (--xfer << 26) | addr); 1520 nvkm_wr32(device, falcon + 0x01c0, 0x01000004 + starstar); 1521 nvkm_wr32(device, falcon + 0x01c4, star + 4); 1522 } 1523 1524 /* Initialize context from an external (secure or not) firmware */ 1525 static int 1526 gf100_gr_init_ctxctl_ext(struct gf100_gr *gr) 1527 { 1528 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1529 struct nvkm_device *device = subdev->device; 1530 struct nvkm_secboot *sb = device->secboot; 1531 u32 secboot_mask = 0; 1532 int ret; 1533 1534 /* load fuc microcode */ 1535 nvkm_mc_unk260(device, 0); 1536 1537 /* securely-managed falcons must be reset using secure boot */ 1538 if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS)) 1539 secboot_mask |= BIT(NVKM_SECBOOT_FALCON_FECS); 1540 else 1541 gf100_gr_init_fw(gr->fecs, &gr->fuc409c, &gr->fuc409d); 1542 1543 if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS)) 1544 secboot_mask |= BIT(NVKM_SECBOOT_FALCON_GPCCS); 1545 else 1546 gf100_gr_init_fw(gr->gpccs, &gr->fuc41ac, &gr->fuc41ad); 1547 1548 if (secboot_mask != 0) { 1549 int ret = nvkm_secboot_reset(sb, secboot_mask); 1550 if (ret) 1551 return ret; 1552 } 1553 1554 nvkm_mc_unk260(device, 1); 1555 1556 /* start both of them running */ 1557 nvkm_wr32(device, 0x409840, 0xffffffff); 1558 nvkm_wr32(device, 0x41a10c, 0x00000000); 1559 nvkm_wr32(device, 0x40910c, 0x00000000); 1560 1561 nvkm_falcon_start(gr->gpccs); 1562 nvkm_falcon_start(gr->fecs); 1563 1564 if (nvkm_msec(device, 2000, 1565 if (nvkm_rd32(device, 0x409800) & 0x00000001) 1566 break; 1567 ) < 0) 1568 return -EBUSY; 1569 1570 gf100_gr_fecs_set_watchdog_timeout(gr, 0x7fffffff); 1571 1572 /* Determine how much memory is required to store main context image. */ 1573 ret = gf100_gr_fecs_discover_image_size(gr, &gr->size); 1574 if (ret) 1575 return ret; 1576 1577 /* Determine how much memory is required to store ZCULL image. */ 1578 ret = gf100_gr_fecs_discover_zcull_image_size(gr, &gr->size_zcull); 1579 if (ret) 1580 return ret; 1581 1582 nvkm_wr32(device, 0x409840, 0xffffffff); 1583 nvkm_wr32(device, 0x409500, 0x00000000); 1584 nvkm_wr32(device, 0x409504, 0x00000025); 1585 if (nvkm_msec(device, 2000, 1586 if (nvkm_rd32(device, 0x409800)) 1587 break; 1588 ) < 0) 1589 return -EBUSY; 1590 1591 if (device->chipset >= 0xe0) { 1592 nvkm_wr32(device, 0x409800, 0x00000000); 1593 nvkm_wr32(device, 0x409500, 0x00000001); 1594 nvkm_wr32(device, 0x409504, 0x00000030); 1595 if (nvkm_msec(device, 2000, 1596 if (nvkm_rd32(device, 0x409800)) 1597 break; 1598 ) < 0) 1599 return -EBUSY; 1600 1601 nvkm_wr32(device, 0x409810, 0xb00095c8); 1602 nvkm_wr32(device, 0x409800, 0x00000000); 1603 nvkm_wr32(device, 0x409500, 0x00000001); 1604 nvkm_wr32(device, 0x409504, 0x00000031); 1605 if (nvkm_msec(device, 2000, 1606 if (nvkm_rd32(device, 0x409800)) 1607 break; 1608 ) < 0) 1609 return -EBUSY; 1610 1611 nvkm_wr32(device, 0x409810, 0x00080420); 1612 nvkm_wr32(device, 0x409800, 0x00000000); 1613 nvkm_wr32(device, 0x409500, 0x00000001); 1614 nvkm_wr32(device, 0x409504, 0x00000032); 1615 if (nvkm_msec(device, 2000, 1616 if (nvkm_rd32(device, 0x409800)) 1617 break; 1618 ) < 0) 1619 return -EBUSY; 1620 1621 nvkm_wr32(device, 0x409614, 0x00000070); 1622 nvkm_wr32(device, 0x409614, 0x00000770); 1623 nvkm_wr32(device, 0x40802c, 0x00000001); 1624 } 1625 1626 if (gr->data == NULL) { 1627 int ret = gf100_grctx_generate(gr); 1628 if (ret) { 1629 nvkm_error(subdev, "failed to construct context\n"); 1630 return ret; 1631 } 1632 } 1633 1634 return 0; 1635 } 1636 1637 static int 1638 gf100_gr_init_ctxctl_int(struct gf100_gr *gr) 1639 { 1640 const struct gf100_grctx_func *grctx = gr->func->grctx; 1641 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1642 struct nvkm_device *device = subdev->device; 1643 1644 if (!gr->func->fecs.ucode) { 1645 return -ENOSYS; 1646 } 1647 1648 /* load HUB microcode */ 1649 nvkm_mc_unk260(device, 0); 1650 nvkm_falcon_load_dmem(gr->fecs, gr->func->fecs.ucode->data.data, 0x0, 1651 gr->func->fecs.ucode->data.size, 0); 1652 nvkm_falcon_load_imem(gr->fecs, gr->func->fecs.ucode->code.data, 0x0, 1653 gr->func->fecs.ucode->code.size, 0, 0, false); 1654 1655 /* load GPC microcode */ 1656 nvkm_falcon_load_dmem(gr->gpccs, gr->func->gpccs.ucode->data.data, 0x0, 1657 gr->func->gpccs.ucode->data.size, 0); 1658 nvkm_falcon_load_imem(gr->gpccs, gr->func->gpccs.ucode->code.data, 0x0, 1659 gr->func->gpccs.ucode->code.size, 0, 0, false); 1660 nvkm_mc_unk260(device, 1); 1661 1662 /* load register lists */ 1663 gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000); 1664 gf100_gr_init_csdata(gr, grctx->gpc_0, 0x41a000, 0x000, 0x418000); 1665 gf100_gr_init_csdata(gr, grctx->gpc_1, 0x41a000, 0x000, 0x418000); 1666 gf100_gr_init_csdata(gr, grctx->tpc, 0x41a000, 0x004, 0x419800); 1667 gf100_gr_init_csdata(gr, grctx->ppc, 0x41a000, 0x008, 0x41be00); 1668 1669 /* start HUB ucode running, it'll init the GPCs */ 1670 nvkm_wr32(device, 0x40910c, 0x00000000); 1671 nvkm_wr32(device, 0x409100, 0x00000002); 1672 if (nvkm_msec(device, 2000, 1673 if (nvkm_rd32(device, 0x409800) & 0x80000000) 1674 break; 1675 ) < 0) { 1676 gf100_gr_ctxctl_debug(gr); 1677 return -EBUSY; 1678 } 1679 1680 gr->size = nvkm_rd32(device, 0x409804); 1681 if (gr->data == NULL) { 1682 int ret = gf100_grctx_generate(gr); 1683 if (ret) { 1684 nvkm_error(subdev, "failed to construct context\n"); 1685 return ret; 1686 } 1687 } 1688 1689 return 0; 1690 } 1691 1692 int 1693 gf100_gr_init_ctxctl(struct gf100_gr *gr) 1694 { 1695 int ret; 1696 1697 if (gr->firmware) 1698 ret = gf100_gr_init_ctxctl_ext(gr); 1699 else 1700 ret = gf100_gr_init_ctxctl_int(gr); 1701 1702 return ret; 1703 } 1704 1705 void 1706 gf100_gr_oneinit_sm_id(struct gf100_gr *gr) 1707 { 1708 int tpc, gpc; 1709 for (tpc = 0; tpc < gr->tpc_max; tpc++) { 1710 for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 1711 if (tpc < gr->tpc_nr[gpc]) { 1712 gr->sm[gr->sm_nr].gpc = gpc; 1713 gr->sm[gr->sm_nr].tpc = tpc; 1714 gr->sm_nr++; 1715 } 1716 } 1717 } 1718 } 1719 1720 void 1721 gf100_gr_oneinit_tiles(struct gf100_gr *gr) 1722 { 1723 static const u8 primes[] = { 1724 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61 1725 }; 1726 int init_frac[GPC_MAX], init_err[GPC_MAX], run_err[GPC_MAX], i, j; 1727 u32 mul_factor, comm_denom; 1728 u8 gpc_map[GPC_MAX]; 1729 bool sorted; 1730 1731 switch (gr->tpc_total) { 1732 case 15: gr->screen_tile_row_offset = 0x06; break; 1733 case 14: gr->screen_tile_row_offset = 0x05; break; 1734 case 13: gr->screen_tile_row_offset = 0x02; break; 1735 case 11: gr->screen_tile_row_offset = 0x07; break; 1736 case 10: gr->screen_tile_row_offset = 0x06; break; 1737 case 7: 1738 case 5: gr->screen_tile_row_offset = 0x01; break; 1739 case 3: gr->screen_tile_row_offset = 0x02; break; 1740 case 2: 1741 case 1: gr->screen_tile_row_offset = 0x01; break; 1742 default: gr->screen_tile_row_offset = 0x03; 1743 for (i = 0; i < ARRAY_SIZE(primes); i++) { 1744 if (gr->tpc_total % primes[i]) { 1745 gr->screen_tile_row_offset = primes[i]; 1746 break; 1747 } 1748 } 1749 break; 1750 } 1751 1752 /* Sort GPCs by TPC count, highest-to-lowest. */ 1753 for (i = 0; i < gr->gpc_nr; i++) 1754 gpc_map[i] = i; 1755 sorted = false; 1756 1757 while (!sorted) { 1758 for (sorted = true, i = 0; i < gr->gpc_nr - 1; i++) { 1759 if (gr->tpc_nr[gpc_map[i + 1]] > 1760 gr->tpc_nr[gpc_map[i + 0]]) { 1761 u8 swap = gpc_map[i]; 1762 gpc_map[i + 0] = gpc_map[i + 1]; 1763 gpc_map[i + 1] = swap; 1764 sorted = false; 1765 } 1766 } 1767 } 1768 1769 /* Determine tile->GPC mapping */ 1770 mul_factor = gr->gpc_nr * gr->tpc_max; 1771 if (mul_factor & 1) 1772 mul_factor = 2; 1773 else 1774 mul_factor = 1; 1775 1776 comm_denom = gr->gpc_nr * gr->tpc_max * mul_factor; 1777 1778 for (i = 0; i < gr->gpc_nr; i++) { 1779 init_frac[i] = gr->tpc_nr[gpc_map[i]] * gr->gpc_nr * mul_factor; 1780 init_err[i] = i * gr->tpc_max * mul_factor - comm_denom/2; 1781 run_err[i] = init_frac[i] + init_err[i]; 1782 } 1783 1784 for (i = 0; i < gr->tpc_total;) { 1785 for (j = 0; j < gr->gpc_nr; j++) { 1786 if ((run_err[j] * 2) >= comm_denom) { 1787 gr->tile[i++] = gpc_map[j]; 1788 run_err[j] += init_frac[j] - comm_denom; 1789 } else { 1790 run_err[j] += init_frac[j]; 1791 } 1792 } 1793 } 1794 } 1795 1796 static int 1797 gf100_gr_oneinit(struct nvkm_gr *base) 1798 { 1799 struct gf100_gr *gr = gf100_gr(base); 1800 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1801 struct nvkm_device *device = subdev->device; 1802 int i, j; 1803 int ret; 1804 1805 ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs); 1806 if (ret) 1807 return ret; 1808 1809 ret = nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs); 1810 if (ret) 1811 return ret; 1812 1813 nvkm_pmu_pgob(device->pmu, false); 1814 1815 gr->rop_nr = gr->func->rops(gr); 1816 gr->gpc_nr = nvkm_rd32(device, 0x409604) & 0x0000001f; 1817 for (i = 0; i < gr->gpc_nr; i++) { 1818 gr->tpc_nr[i] = nvkm_rd32(device, GPC_UNIT(i, 0x2608)); 1819 gr->tpc_max = max(gr->tpc_max, gr->tpc_nr[i]); 1820 gr->tpc_total += gr->tpc_nr[i]; 1821 gr->ppc_nr[i] = gr->func->ppc_nr; 1822 for (j = 0; j < gr->ppc_nr[i]; j++) { 1823 gr->ppc_tpc_mask[i][j] = 1824 nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4))); 1825 if (gr->ppc_tpc_mask[i][j] == 0) 1826 continue; 1827 gr->ppc_mask[i] |= (1 << j); 1828 gr->ppc_tpc_nr[i][j] = hweight8(gr->ppc_tpc_mask[i][j]); 1829 if (gr->ppc_tpc_min == 0 || 1830 gr->ppc_tpc_min > gr->ppc_tpc_nr[i][j]) 1831 gr->ppc_tpc_min = gr->ppc_tpc_nr[i][j]; 1832 if (gr->ppc_tpc_max < gr->ppc_tpc_nr[i][j]) 1833 gr->ppc_tpc_max = gr->ppc_tpc_nr[i][j]; 1834 } 1835 } 1836 1837 memset(gr->tile, 0xff, sizeof(gr->tile)); 1838 gr->func->oneinit_tiles(gr); 1839 gr->func->oneinit_sm_id(gr); 1840 return 0; 1841 } 1842 1843 static int 1844 gf100_gr_init_(struct nvkm_gr *base) 1845 { 1846 struct gf100_gr *gr = gf100_gr(base); 1847 struct nvkm_subdev *subdev = &base->engine.subdev; 1848 u32 ret; 1849 1850 nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false); 1851 1852 ret = nvkm_falcon_get(gr->fecs, subdev); 1853 if (ret) 1854 return ret; 1855 1856 ret = nvkm_falcon_get(gr->gpccs, subdev); 1857 if (ret) 1858 return ret; 1859 1860 return gr->func->init(gr); 1861 } 1862 1863 static int 1864 gf100_gr_fini_(struct nvkm_gr *base, bool suspend) 1865 { 1866 struct gf100_gr *gr = gf100_gr(base); 1867 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1868 nvkm_falcon_put(gr->gpccs, subdev); 1869 nvkm_falcon_put(gr->fecs, subdev); 1870 return 0; 1871 } 1872 1873 void 1874 gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc) 1875 { 1876 kfree(fuc->data); 1877 fuc->data = NULL; 1878 } 1879 1880 static void 1881 gf100_gr_dtor_init(struct gf100_gr_pack *pack) 1882 { 1883 vfree(pack); 1884 } 1885 1886 void * 1887 gf100_gr_dtor(struct nvkm_gr *base) 1888 { 1889 struct gf100_gr *gr = gf100_gr(base); 1890 1891 if (gr->func->dtor) 1892 gr->func->dtor(gr); 1893 kfree(gr->data); 1894 1895 nvkm_falcon_del(&gr->gpccs); 1896 nvkm_falcon_del(&gr->fecs); 1897 1898 gf100_gr_dtor_fw(&gr->fuc409c); 1899 gf100_gr_dtor_fw(&gr->fuc409d); 1900 gf100_gr_dtor_fw(&gr->fuc41ac); 1901 gf100_gr_dtor_fw(&gr->fuc41ad); 1902 1903 gf100_gr_dtor_init(gr->fuc_bundle); 1904 gf100_gr_dtor_init(gr->fuc_method); 1905 gf100_gr_dtor_init(gr->fuc_sw_ctx); 1906 gf100_gr_dtor_init(gr->fuc_sw_nonctx); 1907 1908 return gr; 1909 } 1910 1911 static const struct nvkm_gr_func 1912 gf100_gr_ = { 1913 .dtor = gf100_gr_dtor, 1914 .oneinit = gf100_gr_oneinit, 1915 .init = gf100_gr_init_, 1916 .fini = gf100_gr_fini_, 1917 .intr = gf100_gr_intr, 1918 .units = gf100_gr_units, 1919 .chan_new = gf100_gr_chan_new, 1920 .object_get = gf100_gr_object_get, 1921 .chsw_load = gf100_gr_chsw_load, 1922 }; 1923 1924 int 1925 gf100_gr_ctor_fw_legacy(struct gf100_gr *gr, const char *fwname, 1926 struct gf100_gr_fuc *fuc, int ret) 1927 { 1928 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1929 struct nvkm_device *device = subdev->device; 1930 const struct firmware *fw; 1931 char f[32]; 1932 1933 /* see if this firmware has a legacy path */ 1934 if (!strcmp(fwname, "fecs_inst")) 1935 fwname = "fuc409c"; 1936 else if (!strcmp(fwname, "fecs_data")) 1937 fwname = "fuc409d"; 1938 else if (!strcmp(fwname, "gpccs_inst")) 1939 fwname = "fuc41ac"; 1940 else if (!strcmp(fwname, "gpccs_data")) 1941 fwname = "fuc41ad"; 1942 else { 1943 /* nope, let's just return the error we got */ 1944 nvkm_error(subdev, "failed to load %s\n", fwname); 1945 return ret; 1946 } 1947 1948 /* yes, try to load from the legacy path */ 1949 nvkm_debug(subdev, "%s: falling back to legacy path\n", fwname); 1950 1951 snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname); 1952 ret = request_firmware(&fw, f, device->dev); 1953 if (ret) { 1954 snprintf(f, sizeof(f), "nouveau/%s", fwname); 1955 ret = request_firmware(&fw, f, device->dev); 1956 if (ret) { 1957 nvkm_error(subdev, "failed to load %s\n", fwname); 1958 return ret; 1959 } 1960 } 1961 1962 fuc->size = fw->size; 1963 fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL); 1964 release_firmware(fw); 1965 return (fuc->data != NULL) ? 0 : -ENOMEM; 1966 } 1967 1968 int 1969 gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname, 1970 struct gf100_gr_fuc *fuc) 1971 { 1972 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1973 struct nvkm_device *device = subdev->device; 1974 const struct firmware *fw; 1975 int ret; 1976 1977 ret = nvkm_firmware_get(device, fwname, &fw); 1978 if (ret) { 1979 ret = gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret); 1980 if (ret) 1981 return -ENODEV; 1982 return 0; 1983 } 1984 1985 fuc->size = fw->size; 1986 fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL); 1987 nvkm_firmware_put(fw); 1988 return (fuc->data != NULL) ? 0 : -ENOMEM; 1989 } 1990 1991 int 1992 gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device, 1993 int index, struct gf100_gr *gr) 1994 { 1995 gr->func = func; 1996 gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW", 1997 func->fecs.ucode == NULL); 1998 1999 return nvkm_gr_ctor(&gf100_gr_, device, index, 2000 gr->firmware || func->fecs.ucode != NULL, 2001 &gr->base); 2002 } 2003 2004 int 2005 gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device, 2006 int index, struct nvkm_gr **pgr) 2007 { 2008 struct gf100_gr *gr; 2009 int ret; 2010 2011 if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) 2012 return -ENOMEM; 2013 *pgr = &gr->base; 2014 2015 ret = gf100_gr_ctor(func, device, index, gr); 2016 if (ret) 2017 return ret; 2018 2019 if (gr->firmware) { 2020 if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) || 2021 gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) || 2022 gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) || 2023 gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad)) 2024 return -ENODEV; 2025 } 2026 2027 return 0; 2028 } 2029 2030 void 2031 gf100_gr_init_400054(struct gf100_gr *gr) 2032 { 2033 nvkm_wr32(gr->base.engine.subdev.device, 0x400054, 0x34ce3464); 2034 } 2035 2036 void 2037 gf100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc) 2038 { 2039 struct nvkm_device *device = gr->base.engine.subdev.device; 2040 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe); 2041 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f); 2042 } 2043 2044 void 2045 gf100_gr_init_tex_hww_esr(struct gf100_gr *gr, int gpc, int tpc) 2046 { 2047 struct nvkm_device *device = gr->base.engine.subdev.device; 2048 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000); 2049 } 2050 2051 void 2052 gf100_gr_init_419eb4(struct gf100_gr *gr) 2053 { 2054 struct nvkm_device *device = gr->base.engine.subdev.device; 2055 nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000); 2056 } 2057 2058 void 2059 gf100_gr_init_419cc0(struct gf100_gr *gr) 2060 { 2061 struct nvkm_device *device = gr->base.engine.subdev.device; 2062 int gpc, tpc; 2063 2064 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008); 2065 2066 for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 2067 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) 2068 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000); 2069 } 2070 } 2071 2072 void 2073 gf100_gr_init_40601c(struct gf100_gr *gr) 2074 { 2075 nvkm_wr32(gr->base.engine.subdev.device, 0x40601c, 0xc0000000); 2076 } 2077 2078 void 2079 gf100_gr_init_fecs_exceptions(struct gf100_gr *gr) 2080 { 2081 const u32 data = gr->firmware ? 0x000e0000 : 0x000e0001; 2082 nvkm_wr32(gr->base.engine.subdev.device, 0x409c24, data); 2083 } 2084 2085 void 2086 gf100_gr_init_gpc_mmu(struct gf100_gr *gr) 2087 { 2088 struct nvkm_device *device = gr->base.engine.subdev.device; 2089 struct nvkm_fb *fb = device->fb; 2090 2091 nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0x00000001); 2092 nvkm_wr32(device, 0x4188a4, 0x03000000); 2093 nvkm_wr32(device, 0x418888, 0x00000000); 2094 nvkm_wr32(device, 0x41888c, 0x00000000); 2095 nvkm_wr32(device, 0x418890, 0x00000000); 2096 nvkm_wr32(device, 0x418894, 0x00000000); 2097 nvkm_wr32(device, 0x4188b4, nvkm_memory_addr(fb->mmu_wr) >> 8); 2098 nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(fb->mmu_rd) >> 8); 2099 } 2100 2101 void 2102 gf100_gr_init_num_active_ltcs(struct gf100_gr *gr) 2103 { 2104 struct nvkm_device *device = gr->base.engine.subdev.device; 2105 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800)); 2106 } 2107 2108 void 2109 gf100_gr_init_zcull(struct gf100_gr *gr) 2110 { 2111 struct nvkm_device *device = gr->base.engine.subdev.device; 2112 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total); 2113 const u8 tile_nr = ALIGN(gr->tpc_total, 32); 2114 u8 bank[GPC_MAX] = {}, gpc, i, j; 2115 u32 data; 2116 2117 for (i = 0; i < tile_nr; i += 8) { 2118 for (data = 0, j = 0; j < 8 && i + j < gr->tpc_total; j++) { 2119 data |= bank[gr->tile[i + j]] << (j * 4); 2120 bank[gr->tile[i + j]]++; 2121 } 2122 nvkm_wr32(device, GPC_BCAST(0x0980 + ((i / 8) * 4)), data); 2123 } 2124 2125 for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 2126 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914), 2127 gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]); 2128 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 | 2129 gr->tpc_total); 2130 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918); 2131 } 2132 2133 nvkm_wr32(device, GPC_BCAST(0x1bd4), magicgpc918); 2134 } 2135 2136 void 2137 gf100_gr_init_vsc_stream_master(struct gf100_gr *gr) 2138 { 2139 struct nvkm_device *device = gr->base.engine.subdev.device; 2140 nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001); 2141 } 2142 2143 int 2144 gf100_gr_init(struct gf100_gr *gr) 2145 { 2146 struct nvkm_device *device = gr->base.engine.subdev.device; 2147 int gpc, tpc, rop; 2148 2149 if (gr->func->init_419bd8) 2150 gr->func->init_419bd8(gr); 2151 2152 gr->func->init_gpc_mmu(gr); 2153 2154 if (gr->fuc_sw_nonctx) 2155 gf100_gr_mmio(gr, gr->fuc_sw_nonctx); 2156 else 2157 gf100_gr_mmio(gr, gr->func->mmio); 2158 2159 gf100_gr_wait_idle(gr); 2160 2161 if (gr->func->init_r405a14) 2162 gr->func->init_r405a14(gr); 2163 2164 if (gr->func->clkgate_pack) 2165 nvkm_therm_clkgate_init(device->therm, gr->func->clkgate_pack); 2166 2167 if (gr->func->init_bios) 2168 gr->func->init_bios(gr); 2169 2170 gr->func->init_vsc_stream_master(gr); 2171 gr->func->init_zcull(gr); 2172 gr->func->init_num_active_ltcs(gr); 2173 if (gr->func->init_rop_active_fbps) 2174 gr->func->init_rop_active_fbps(gr); 2175 if (gr->func->init_bios_2) 2176 gr->func->init_bios_2(gr); 2177 if (gr->func->init_swdx_pes_mask) 2178 gr->func->init_swdx_pes_mask(gr); 2179 2180 nvkm_wr32(device, 0x400500, 0x00010001); 2181 2182 nvkm_wr32(device, 0x400100, 0xffffffff); 2183 nvkm_wr32(device, 0x40013c, 0xffffffff); 2184 nvkm_wr32(device, 0x400124, 0x00000002); 2185 2186 gr->func->init_fecs_exceptions(gr); 2187 if (gr->func->init_ds_hww_esr_2) 2188 gr->func->init_ds_hww_esr_2(gr); 2189 2190 nvkm_wr32(device, 0x404000, 0xc0000000); 2191 nvkm_wr32(device, 0x404600, 0xc0000000); 2192 nvkm_wr32(device, 0x408030, 0xc0000000); 2193 2194 if (gr->func->init_40601c) 2195 gr->func->init_40601c(gr); 2196 2197 nvkm_wr32(device, 0x404490, 0xc0000000); 2198 nvkm_wr32(device, 0x406018, 0xc0000000); 2199 2200 if (gr->func->init_sked_hww_esr) 2201 gr->func->init_sked_hww_esr(gr); 2202 2203 nvkm_wr32(device, 0x405840, 0xc0000000); 2204 nvkm_wr32(device, 0x405844, 0x00ffffff); 2205 2206 if (gr->func->init_419cc0) 2207 gr->func->init_419cc0(gr); 2208 if (gr->func->init_419eb4) 2209 gr->func->init_419eb4(gr); 2210 if (gr->func->init_419c9c) 2211 gr->func->init_419c9c(gr); 2212 2213 if (gr->func->init_ppc_exceptions) 2214 gr->func->init_ppc_exceptions(gr); 2215 2216 for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 2217 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000); 2218 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000); 2219 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000); 2220 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000); 2221 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) { 2222 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff); 2223 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff); 2224 if (gr->func->init_tex_hww_esr) 2225 gr->func->init_tex_hww_esr(gr, gpc, tpc); 2226 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000); 2227 if (gr->func->init_504430) 2228 gr->func->init_504430(gr, gpc, tpc); 2229 gr->func->init_shader_exceptions(gr, gpc, tpc); 2230 } 2231 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff); 2232 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff); 2233 } 2234 2235 for (rop = 0; rop < gr->rop_nr; rop++) { 2236 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000); 2237 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000); 2238 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff); 2239 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff); 2240 } 2241 2242 nvkm_wr32(device, 0x400108, 0xffffffff); 2243 nvkm_wr32(device, 0x400138, 0xffffffff); 2244 nvkm_wr32(device, 0x400118, 0xffffffff); 2245 nvkm_wr32(device, 0x400130, 0xffffffff); 2246 nvkm_wr32(device, 0x40011c, 0xffffffff); 2247 nvkm_wr32(device, 0x400134, 0xffffffff); 2248 2249 if (gr->func->init_400054) 2250 gr->func->init_400054(gr); 2251 2252 gf100_gr_zbc_init(gr); 2253 2254 if (gr->func->init_4188a4) 2255 gr->func->init_4188a4(gr); 2256 2257 return gf100_gr_init_ctxctl(gr); 2258 } 2259 2260 #include "fuc/hubgf100.fuc3.h" 2261 2262 struct gf100_gr_ucode 2263 gf100_gr_fecs_ucode = { 2264 .code.data = gf100_grhub_code, 2265 .code.size = sizeof(gf100_grhub_code), 2266 .data.data = gf100_grhub_data, 2267 .data.size = sizeof(gf100_grhub_data), 2268 }; 2269 2270 #include "fuc/gpcgf100.fuc3.h" 2271 2272 struct gf100_gr_ucode 2273 gf100_gr_gpccs_ucode = { 2274 .code.data = gf100_grgpc_code, 2275 .code.size = sizeof(gf100_grgpc_code), 2276 .data.data = gf100_grgpc_data, 2277 .data.size = sizeof(gf100_grgpc_data), 2278 }; 2279 2280 static const struct gf100_gr_func 2281 gf100_gr = { 2282 .oneinit_tiles = gf100_gr_oneinit_tiles, 2283 .oneinit_sm_id = gf100_gr_oneinit_sm_id, 2284 .init = gf100_gr_init, 2285 .init_gpc_mmu = gf100_gr_init_gpc_mmu, 2286 .init_vsc_stream_master = gf100_gr_init_vsc_stream_master, 2287 .init_zcull = gf100_gr_init_zcull, 2288 .init_num_active_ltcs = gf100_gr_init_num_active_ltcs, 2289 .init_fecs_exceptions = gf100_gr_init_fecs_exceptions, 2290 .init_40601c = gf100_gr_init_40601c, 2291 .init_419cc0 = gf100_gr_init_419cc0, 2292 .init_419eb4 = gf100_gr_init_419eb4, 2293 .init_tex_hww_esr = gf100_gr_init_tex_hww_esr, 2294 .init_shader_exceptions = gf100_gr_init_shader_exceptions, 2295 .init_400054 = gf100_gr_init_400054, 2296 .trap_mp = gf100_gr_trap_mp, 2297 .mmio = gf100_gr_pack_mmio, 2298 .fecs.ucode = &gf100_gr_fecs_ucode, 2299 .gpccs.ucode = &gf100_gr_gpccs_ucode, 2300 .rops = gf100_gr_rops, 2301 .grctx = &gf100_grctx, 2302 .zbc = &gf100_gr_zbc, 2303 .sclass = { 2304 { -1, -1, FERMI_TWOD_A }, 2305 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A }, 2306 { -1, -1, FERMI_A, &gf100_fermi }, 2307 { -1, -1, FERMI_COMPUTE_A }, 2308 {} 2309 } 2310 }; 2311 2312 int 2313 gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 2314 { 2315 return gf100_gr_new_(&gf100_gr, device, index, pgr); 2316 } 2317