1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "gk104.h" 25 #include "cgrp.h" 26 #include "changk104.h" 27 28 #include <core/client.h> 29 #include <core/gpuobj.h> 30 #include <subdev/bar.h> 31 #include <subdev/fault.h> 32 #include <subdev/timer.h> 33 #include <subdev/top.h> 34 #include <engine/sw.h> 35 36 #include <nvif/class.h> 37 #include <nvif/cl0080.h> 38 39 void 40 gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn, 41 struct gk104_fifo_engine_status *status) 42 { 43 struct nvkm_engine *engine = fifo->engine[engn].engine; 44 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 45 struct nvkm_device *device = subdev->device; 46 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08)); 47 48 status->busy = !!(stat & 0x80000000); 49 status->faulted = !!(stat & 0x40000000); 50 status->next.tsg = !!(stat & 0x10000000); 51 status->next.id = (stat & 0x0fff0000) >> 16; 52 status->chsw = !!(stat & 0x00008000); 53 status->save = !!(stat & 0x00004000); 54 status->load = !!(stat & 0x00002000); 55 status->prev.tsg = !!(stat & 0x00001000); 56 status->prev.id = (stat & 0x00000fff); 57 status->chan = NULL; 58 59 if (status->busy && status->chsw) { 60 if (status->load && status->save) { 61 if (engine && nvkm_engine_chsw_load(engine)) 62 status->chan = &status->next; 63 else 64 status->chan = &status->prev; 65 } else 66 if (status->load) { 67 status->chan = &status->next; 68 } else { 69 status->chan = &status->prev; 70 } 71 } else 72 if (status->load) { 73 status->chan = &status->prev; 74 } 75 76 nvkm_debug(subdev, "engine %02d: busy %d faulted %d chsw %d " 77 "save %d load %d %sid %d%s-> %sid %d%s\n", 78 engn, status->busy, status->faulted, 79 status->chsw, status->save, status->load, 80 status->prev.tsg ? "tsg" : "ch", status->prev.id, 81 status->chan == &status->prev ? "*" : " ", 82 status->next.tsg ? "tsg" : "ch", status->next.id, 83 status->chan == &status->next ? "*" : " "); 84 } 85 86 int 87 gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, 88 void *argv, u32 argc, struct nvkm_object **pobject) 89 { 90 struct gk104_fifo *fifo = gk104_fifo(base); 91 if (oclass->engn == &fifo->func->chan) { 92 const struct gk104_fifo_chan_user *user = oclass->engn; 93 return user->ctor(fifo, oclass, argv, argc, pobject); 94 } else 95 if (oclass->engn == &fifo->func->user) { 96 const struct gk104_fifo_user_user *user = oclass->engn; 97 return user->ctor(oclass, argv, argc, pobject); 98 } 99 WARN_ON(1); 100 return -EINVAL; 101 } 102 103 int 104 gk104_fifo_class_get(struct nvkm_fifo *base, int index, 105 struct nvkm_oclass *oclass) 106 { 107 struct gk104_fifo *fifo = gk104_fifo(base); 108 int c = 0; 109 110 if (fifo->func->user.ctor && c++ == index) { 111 oclass->base = fifo->func->user.user; 112 oclass->engn = &fifo->func->user; 113 return 0; 114 } 115 116 if (fifo->func->chan.ctor && c++ == index) { 117 oclass->base = fifo->func->chan.user; 118 oclass->engn = &fifo->func->chan; 119 return 0; 120 } 121 122 return c; 123 } 124 125 void 126 gk104_fifo_uevent_fini(struct nvkm_fifo *fifo) 127 { 128 struct nvkm_device *device = fifo->engine.subdev.device; 129 nvkm_mask(device, 0x002140, 0x80000000, 0x00000000); 130 } 131 132 void 133 gk104_fifo_uevent_init(struct nvkm_fifo *fifo) 134 { 135 struct nvkm_device *device = fifo->engine.subdev.device; 136 nvkm_mask(device, 0x002140, 0x80000000, 0x80000000); 137 } 138 139 void 140 gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl, 141 struct nvkm_memory *mem, int nr) 142 { 143 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 144 struct nvkm_device *device = subdev->device; 145 int target; 146 147 switch (nvkm_memory_target(mem)) { 148 case NVKM_MEM_TARGET_VRAM: target = 0; break; 149 case NVKM_MEM_TARGET_NCOH: target = 3; break; 150 default: 151 WARN_ON(1); 152 return; 153 } 154 155 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) | 156 (target << 28)); 157 nvkm_wr32(device, 0x002274, (runl << 20) | nr); 158 159 if (nvkm_msec(device, 2000, 160 if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000)) 161 break; 162 ) < 0) 163 nvkm_error(subdev, "runlist %d update timeout\n", runl); 164 } 165 166 void 167 gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl) 168 { 169 const struct gk104_fifo_runlist_func *func = fifo->func->runlist; 170 struct gk104_fifo_chan *chan; 171 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 172 struct nvkm_memory *mem; 173 struct nvkm_fifo_cgrp *cgrp; 174 int nr = 0; 175 176 mutex_lock(&subdev->mutex); 177 mem = fifo->runlist[runl].mem[fifo->runlist[runl].next]; 178 fifo->runlist[runl].next = !fifo->runlist[runl].next; 179 180 nvkm_kmap(mem); 181 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { 182 func->chan(chan, mem, nr++ * func->size); 183 } 184 185 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) { 186 func->cgrp(cgrp, mem, nr++ * func->size); 187 list_for_each_entry(chan, &cgrp->chan, head) { 188 func->chan(chan, mem, nr++ * func->size); 189 } 190 } 191 nvkm_done(mem); 192 193 func->commit(fifo, runl, mem, nr); 194 mutex_unlock(&subdev->mutex); 195 } 196 197 void 198 gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) 199 { 200 struct nvkm_fifo_cgrp *cgrp = chan->cgrp; 201 mutex_lock(&fifo->base.engine.subdev.mutex); 202 if (!list_empty(&chan->head)) { 203 list_del_init(&chan->head); 204 if (cgrp && !--cgrp->chan_nr) 205 list_del_init(&cgrp->head); 206 } 207 mutex_unlock(&fifo->base.engine.subdev.mutex); 208 } 209 210 void 211 gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) 212 { 213 struct nvkm_fifo_cgrp *cgrp = chan->cgrp; 214 mutex_lock(&fifo->base.engine.subdev.mutex); 215 if (cgrp) { 216 if (!cgrp->chan_nr++) 217 list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp); 218 list_add_tail(&chan->head, &cgrp->chan); 219 } else { 220 list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan); 221 } 222 mutex_unlock(&fifo->base.engine.subdev.mutex); 223 } 224 225 void 226 gk104_fifo_runlist_chan(struct gk104_fifo_chan *chan, 227 struct nvkm_memory *memory, u32 offset) 228 { 229 nvkm_wo32(memory, offset + 0, chan->base.chid); 230 nvkm_wo32(memory, offset + 4, 0x00000000); 231 } 232 233 const struct gk104_fifo_runlist_func 234 gk104_fifo_runlist = { 235 .size = 8, 236 .chan = gk104_fifo_runlist_chan, 237 .commit = gk104_fifo_runlist_commit, 238 }; 239 240 void 241 gk104_fifo_pbdma_init(struct gk104_fifo *fifo) 242 { 243 struct nvkm_device *device = fifo->base.engine.subdev.device; 244 nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1); 245 } 246 247 int 248 gk104_fifo_pbdma_nr(struct gk104_fifo *fifo) 249 { 250 struct nvkm_device *device = fifo->base.engine.subdev.device; 251 /* Determine number of PBDMAs by checking valid enable bits. */ 252 nvkm_wr32(device, 0x000204, 0xffffffff); 253 return hweight32(nvkm_rd32(device, 0x000204)); 254 } 255 256 const struct gk104_fifo_pbdma_func 257 gk104_fifo_pbdma = { 258 .nr = gk104_fifo_pbdma_nr, 259 .init = gk104_fifo_pbdma_init, 260 }; 261 262 static void 263 gk104_fifo_recover_work(struct work_struct *w) 264 { 265 struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work); 266 struct nvkm_device *device = fifo->base.engine.subdev.device; 267 struct nvkm_engine *engine; 268 unsigned long flags; 269 u32 engm, runm, todo; 270 int engn, runl; 271 272 spin_lock_irqsave(&fifo->base.lock, flags); 273 runm = fifo->recover.runm; 274 engm = fifo->recover.engm; 275 fifo->recover.engm = 0; 276 fifo->recover.runm = 0; 277 spin_unlock_irqrestore(&fifo->base.lock, flags); 278 279 nvkm_mask(device, 0x002630, runm, runm); 280 281 for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) { 282 if ((engine = fifo->engine[engn].engine)) { 283 nvkm_subdev_fini(&engine->subdev, false); 284 WARN_ON(nvkm_subdev_init(&engine->subdev)); 285 } 286 } 287 288 for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl)) 289 gk104_fifo_runlist_update(fifo, runl); 290 291 nvkm_wr32(device, 0x00262c, runm); 292 nvkm_mask(device, 0x002630, runm, 0x00000000); 293 } 294 295 static void gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn); 296 297 static void 298 gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl) 299 { 300 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 301 struct nvkm_device *device = subdev->device; 302 const u32 runm = BIT(runl); 303 304 assert_spin_locked(&fifo->base.lock); 305 if (fifo->recover.runm & runm) 306 return; 307 fifo->recover.runm |= runm; 308 309 /* Block runlist to prevent channel assignment(s) from changing. */ 310 nvkm_mask(device, 0x002630, runm, runm); 311 312 /* Schedule recovery. */ 313 nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl); 314 schedule_work(&fifo->recover.work); 315 } 316 317 static struct gk104_fifo_chan * 318 gk104_fifo_recover_chid(struct gk104_fifo *fifo, int runl, int chid) 319 { 320 struct gk104_fifo_chan *chan; 321 struct nvkm_fifo_cgrp *cgrp; 322 323 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { 324 if (chan->base.chid == chid) { 325 list_del_init(&chan->head); 326 return chan; 327 } 328 } 329 330 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) { 331 if (cgrp->id == chid) { 332 chan = list_first_entry(&cgrp->chan, typeof(*chan), head); 333 list_del_init(&chan->head); 334 if (!--cgrp->chan_nr) 335 list_del_init(&cgrp->head); 336 return chan; 337 } 338 } 339 340 return NULL; 341 } 342 343 static void 344 gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid) 345 { 346 struct gk104_fifo *fifo = gk104_fifo(base); 347 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 348 struct nvkm_device *device = subdev->device; 349 const u32 stat = nvkm_rd32(device, 0x800004 + (chid * 0x08)); 350 const u32 runl = (stat & 0x000f0000) >> 16; 351 const bool used = (stat & 0x00000001); 352 unsigned long engn, engm = fifo->runlist[runl].engm; 353 struct gk104_fifo_chan *chan; 354 355 assert_spin_locked(&fifo->base.lock); 356 if (!used) 357 return; 358 359 /* Lookup SW state for channel, and mark it as dead. */ 360 chan = gk104_fifo_recover_chid(fifo, runl, chid); 361 if (chan) { 362 chan->killed = true; 363 nvkm_fifo_kevent(&fifo->base, chid); 364 } 365 366 /* Disable channel. */ 367 nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800); 368 nvkm_warn(subdev, "channel %d: killed\n", chid); 369 370 /* Block channel assignments from changing during recovery. */ 371 gk104_fifo_recover_runl(fifo, runl); 372 373 /* Schedule recovery for any engines the channel is on. */ 374 for_each_set_bit(engn, &engm, fifo->engine_nr) { 375 struct gk104_fifo_engine_status status; 376 gk104_fifo_engine_status(fifo, engn, &status); 377 if (!status.chan || status.chan->id != chid) 378 continue; 379 gk104_fifo_recover_engn(fifo, engn); 380 } 381 } 382 383 static void 384 gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn) 385 { 386 struct nvkm_engine *engine = fifo->engine[engn].engine; 387 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 388 struct nvkm_device *device = subdev->device; 389 const u32 runl = fifo->engine[engn].runl; 390 const u32 engm = BIT(engn); 391 struct gk104_fifo_engine_status status; 392 int mmui = -1; 393 394 assert_spin_locked(&fifo->base.lock); 395 if (fifo->recover.engm & engm) 396 return; 397 fifo->recover.engm |= engm; 398 399 /* Block channel assignments from changing during recovery. */ 400 gk104_fifo_recover_runl(fifo, runl); 401 402 /* Determine which channel (if any) is currently on the engine. */ 403 gk104_fifo_engine_status(fifo, engn, &status); 404 if (status.chan) { 405 /* The channel is not longer viable, kill it. */ 406 gk104_fifo_recover_chan(&fifo->base, status.chan->id); 407 } 408 409 /* Determine MMU fault ID for the engine, if we're not being 410 * called from the fault handler already. 411 */ 412 if (!status.faulted && engine) { 413 mmui = nvkm_top_fault_id(device, engine->subdev.index); 414 if (mmui < 0) { 415 const struct nvkm_enum *en = fifo->func->fault.engine; 416 for (; en && en->name; en++) { 417 if (en->data2 == engine->subdev.index) { 418 mmui = en->value; 419 break; 420 } 421 } 422 } 423 WARN_ON(mmui < 0); 424 } 425 426 /* Trigger a MMU fault for the engine. 427 * 428 * No good idea why this is needed, but nvgpu does something similar, 429 * and it makes recovery from CTXSW_TIMEOUT a lot more reliable. 430 */ 431 if (mmui >= 0) { 432 nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000100 | mmui); 433 434 /* Wait for fault to trigger. */ 435 nvkm_msec(device, 2000, 436 gk104_fifo_engine_status(fifo, engn, &status); 437 if (status.faulted) 438 break; 439 ); 440 441 /* Release MMU fault trigger, and ACK the fault. */ 442 nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000000); 443 nvkm_wr32(device, 0x00259c, BIT(mmui)); 444 nvkm_wr32(device, 0x002100, 0x10000000); 445 } 446 447 /* Schedule recovery. */ 448 nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn); 449 schedule_work(&fifo->recover.work); 450 } 451 452 static void 453 gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info) 454 { 455 struct gk104_fifo *fifo = gk104_fifo(base); 456 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 457 struct nvkm_device *device = subdev->device; 458 const struct nvkm_enum *er, *ee, *ec, *ea; 459 struct nvkm_engine *engine = NULL; 460 struct nvkm_fifo_chan *chan; 461 unsigned long flags; 462 char ct[8] = "HUB/", en[16] = ""; 463 int engn; 464 465 er = nvkm_enum_find(fifo->func->fault.reason, info->reason); 466 ee = nvkm_enum_find(fifo->func->fault.engine, info->engine); 467 if (info->hub) { 468 ec = nvkm_enum_find(fifo->func->fault.hubclient, info->client); 469 } else { 470 ec = nvkm_enum_find(fifo->func->fault.gpcclient, info->client); 471 snprintf(ct, sizeof(ct), "GPC%d/", info->gpc); 472 } 473 ea = nvkm_enum_find(fifo->func->fault.access, info->access); 474 475 if (ee && ee->data2) { 476 switch (ee->data2) { 477 case NVKM_SUBDEV_BAR: 478 nvkm_bar_bar1_reset(device); 479 break; 480 case NVKM_SUBDEV_INSTMEM: 481 nvkm_bar_bar2_reset(device); 482 break; 483 case NVKM_ENGINE_IFB: 484 nvkm_mask(device, 0x001718, 0x00000000, 0x00000000); 485 break; 486 default: 487 engine = nvkm_device_engine(device, ee->data2); 488 break; 489 } 490 } 491 492 if (ee == NULL) { 493 enum nvkm_devidx engidx = nvkm_top_fault(device, info->engine); 494 if (engidx < NVKM_SUBDEV_NR) { 495 const char *src = nvkm_subdev_name[engidx]; 496 char *dst = en; 497 do { 498 *dst++ = toupper(*src++); 499 } while(*src); 500 engine = nvkm_device_engine(device, engidx); 501 } 502 } else { 503 snprintf(en, sizeof(en), "%s", ee->name); 504 } 505 506 spin_lock_irqsave(&fifo->base.lock, flags); 507 chan = nvkm_fifo_chan_inst_locked(&fifo->base, info->inst); 508 509 nvkm_error(subdev, 510 "fault %02x [%s] at %016llx engine %02x [%s] client %02x " 511 "[%s%s] reason %02x [%s] on channel %d [%010llx %s]\n", 512 info->access, ea ? ea->name : "", info->addr, 513 info->engine, ee ? ee->name : en, 514 info->client, ct, ec ? ec->name : "", 515 info->reason, er ? er->name : "", chan ? chan->chid : -1, 516 info->inst, chan ? chan->object.client->name : "unknown"); 517 518 /* Kill the channel that caused the fault. */ 519 if (chan) 520 gk104_fifo_recover_chan(&fifo->base, chan->chid); 521 522 /* Channel recovery will probably have already done this for the 523 * correct engine(s), but just in case we can't find the channel 524 * information... 525 */ 526 for (engn = 0; engn < fifo->engine_nr && engine; engn++) { 527 if (fifo->engine[engn].engine == engine) { 528 gk104_fifo_recover_engn(fifo, engn); 529 break; 530 } 531 } 532 533 spin_unlock_irqrestore(&fifo->base.lock, flags); 534 } 535 536 static const struct nvkm_enum 537 gk104_fifo_bind_reason[] = { 538 { 0x01, "BIND_NOT_UNBOUND" }, 539 { 0x02, "SNOOP_WITHOUT_BAR1" }, 540 { 0x03, "UNBIND_WHILE_RUNNING" }, 541 { 0x05, "INVALID_RUNLIST" }, 542 { 0x06, "INVALID_CTX_TGT" }, 543 { 0x0b, "UNBIND_WHILE_PARKED" }, 544 {} 545 }; 546 547 void 548 gk104_fifo_intr_bind(struct gk104_fifo *fifo) 549 { 550 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 551 struct nvkm_device *device = subdev->device; 552 u32 intr = nvkm_rd32(device, 0x00252c); 553 u32 code = intr & 0x000000ff; 554 const struct nvkm_enum *en = 555 nvkm_enum_find(gk104_fifo_bind_reason, code); 556 557 nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : ""); 558 } 559 560 static const struct nvkm_enum 561 gk104_fifo_sched_reason[] = { 562 { 0x0a, "CTXSW_TIMEOUT" }, 563 {} 564 }; 565 566 static void 567 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) 568 { 569 struct nvkm_device *device = fifo->base.engine.subdev.device; 570 unsigned long flags, engm = 0; 571 u32 engn; 572 573 /* We need to ACK the SCHED_ERROR here, and prevent it reasserting, 574 * as MMU_FAULT cannot be triggered while it's pending. 575 */ 576 spin_lock_irqsave(&fifo->base.lock, flags); 577 nvkm_mask(device, 0x002140, 0x00000100, 0x00000000); 578 nvkm_wr32(device, 0x002100, 0x00000100); 579 580 for (engn = 0; engn < fifo->engine_nr; engn++) { 581 struct gk104_fifo_engine_status status; 582 583 gk104_fifo_engine_status(fifo, engn, &status); 584 if (!status.busy || !status.chsw) 585 continue; 586 587 engm |= BIT(engn); 588 } 589 590 for_each_set_bit(engn, &engm, fifo->engine_nr) 591 gk104_fifo_recover_engn(fifo, engn); 592 593 nvkm_mask(device, 0x002140, 0x00000100, 0x00000100); 594 spin_unlock_irqrestore(&fifo->base.lock, flags); 595 } 596 597 static void 598 gk104_fifo_intr_sched(struct gk104_fifo *fifo) 599 { 600 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 601 struct nvkm_device *device = subdev->device; 602 u32 intr = nvkm_rd32(device, 0x00254c); 603 u32 code = intr & 0x000000ff; 604 const struct nvkm_enum *en = 605 nvkm_enum_find(gk104_fifo_sched_reason, code); 606 607 nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : ""); 608 609 switch (code) { 610 case 0x0a: 611 gk104_fifo_intr_sched_ctxsw(fifo); 612 break; 613 default: 614 break; 615 } 616 } 617 618 void 619 gk104_fifo_intr_chsw(struct gk104_fifo *fifo) 620 { 621 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 622 struct nvkm_device *device = subdev->device; 623 u32 stat = nvkm_rd32(device, 0x00256c); 624 nvkm_error(subdev, "CHSW_ERROR %08x\n", stat); 625 nvkm_wr32(device, 0x00256c, stat); 626 } 627 628 void 629 gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo) 630 { 631 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 632 struct nvkm_device *device = subdev->device; 633 u32 stat = nvkm_rd32(device, 0x00259c); 634 nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat); 635 } 636 637 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = { 638 { 0x00000001, "MEMREQ" }, 639 { 0x00000002, "MEMACK_TIMEOUT" }, 640 { 0x00000004, "MEMACK_EXTRA" }, 641 { 0x00000008, "MEMDAT_TIMEOUT" }, 642 { 0x00000010, "MEMDAT_EXTRA" }, 643 { 0x00000020, "MEMFLUSH" }, 644 { 0x00000040, "MEMOP" }, 645 { 0x00000080, "LBCONNECT" }, 646 { 0x00000100, "LBREQ" }, 647 { 0x00000200, "LBACK_TIMEOUT" }, 648 { 0x00000400, "LBACK_EXTRA" }, 649 { 0x00000800, "LBDAT_TIMEOUT" }, 650 { 0x00001000, "LBDAT_EXTRA" }, 651 { 0x00002000, "GPFIFO" }, 652 { 0x00004000, "GPPTR" }, 653 { 0x00008000, "GPENTRY" }, 654 { 0x00010000, "GPCRC" }, 655 { 0x00020000, "PBPTR" }, 656 { 0x00040000, "PBENTRY" }, 657 { 0x00080000, "PBCRC" }, 658 { 0x00100000, "XBARCONNECT" }, 659 { 0x00200000, "METHOD" }, 660 { 0x00400000, "METHODCRC" }, 661 { 0x00800000, "DEVICE" }, 662 { 0x02000000, "SEMAPHORE" }, 663 { 0x04000000, "ACQUIRE" }, 664 { 0x08000000, "PRI" }, 665 { 0x20000000, "NO_CTXSW_SEG" }, 666 { 0x40000000, "PBSEG" }, 667 { 0x80000000, "SIGNATURE" }, 668 {} 669 }; 670 671 void 672 gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit) 673 { 674 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 675 struct nvkm_device *device = subdev->device; 676 u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000)); 677 u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask; 678 u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000)); 679 u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000)); 680 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff; 681 u32 subc = (addr & 0x00070000) >> 16; 682 u32 mthd = (addr & 0x00003ffc); 683 u32 show = stat; 684 struct nvkm_fifo_chan *chan; 685 unsigned long flags; 686 char msg[128]; 687 688 if (stat & 0x00800000) { 689 if (device->sw) { 690 if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data)) 691 show &= ~0x00800000; 692 } 693 } 694 695 nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008); 696 697 if (show) { 698 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show); 699 chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags); 700 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] " 701 "subc %d mthd %04x data %08x\n", 702 unit, show, msg, chid, chan ? chan->inst->addr : 0, 703 chan ? chan->object.client->name : "unknown", 704 subc, mthd, data); 705 nvkm_fifo_chan_put(&fifo->base, flags, &chan); 706 } 707 708 nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat); 709 } 710 711 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = { 712 { 0x00000001, "HCE_RE_ILLEGAL_OP" }, 713 { 0x00000002, "HCE_RE_ALIGNB" }, 714 { 0x00000004, "HCE_PRIV" }, 715 { 0x00000008, "HCE_ILLEGAL_MTHD" }, 716 { 0x00000010, "HCE_ILLEGAL_CLASS" }, 717 {} 718 }; 719 720 void 721 gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit) 722 { 723 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 724 struct nvkm_device *device = subdev->device; 725 u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000)); 726 u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask; 727 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff; 728 char msg[128]; 729 730 if (stat) { 731 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat); 732 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n", 733 unit, stat, msg, chid, 734 nvkm_rd32(device, 0x040150 + (unit * 0x2000)), 735 nvkm_rd32(device, 0x040154 + (unit * 0x2000))); 736 } 737 738 nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat); 739 } 740 741 void 742 gk104_fifo_intr_runlist(struct gk104_fifo *fifo) 743 { 744 struct nvkm_device *device = fifo->base.engine.subdev.device; 745 u32 mask = nvkm_rd32(device, 0x002a00); 746 while (mask) { 747 int runl = __ffs(mask); 748 wake_up(&fifo->runlist[runl].wait); 749 nvkm_wr32(device, 0x002a00, 1 << runl); 750 mask &= ~(1 << runl); 751 } 752 } 753 754 void 755 gk104_fifo_intr_engine(struct gk104_fifo *fifo) 756 { 757 nvkm_fifo_uevent(&fifo->base); 758 } 759 760 static void 761 gk104_fifo_intr(struct nvkm_fifo *base) 762 { 763 struct gk104_fifo *fifo = gk104_fifo(base); 764 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 765 struct nvkm_device *device = subdev->device; 766 u32 mask = nvkm_rd32(device, 0x002140); 767 u32 stat = nvkm_rd32(device, 0x002100) & mask; 768 769 if (stat & 0x00000001) { 770 gk104_fifo_intr_bind(fifo); 771 nvkm_wr32(device, 0x002100, 0x00000001); 772 stat &= ~0x00000001; 773 } 774 775 if (stat & 0x00000010) { 776 nvkm_error(subdev, "PIO_ERROR\n"); 777 nvkm_wr32(device, 0x002100, 0x00000010); 778 stat &= ~0x00000010; 779 } 780 781 if (stat & 0x00000100) { 782 gk104_fifo_intr_sched(fifo); 783 nvkm_wr32(device, 0x002100, 0x00000100); 784 stat &= ~0x00000100; 785 } 786 787 if (stat & 0x00010000) { 788 gk104_fifo_intr_chsw(fifo); 789 nvkm_wr32(device, 0x002100, 0x00010000); 790 stat &= ~0x00010000; 791 } 792 793 if (stat & 0x00800000) { 794 nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n"); 795 nvkm_wr32(device, 0x002100, 0x00800000); 796 stat &= ~0x00800000; 797 } 798 799 if (stat & 0x01000000) { 800 nvkm_error(subdev, "LB_ERROR\n"); 801 nvkm_wr32(device, 0x002100, 0x01000000); 802 stat &= ~0x01000000; 803 } 804 805 if (stat & 0x08000000) { 806 gk104_fifo_intr_dropped_fault(fifo); 807 nvkm_wr32(device, 0x002100, 0x08000000); 808 stat &= ~0x08000000; 809 } 810 811 if (stat & 0x10000000) { 812 u32 mask = nvkm_rd32(device, 0x00259c); 813 while (mask) { 814 u32 unit = __ffs(mask); 815 fifo->func->intr.fault(&fifo->base, unit); 816 nvkm_wr32(device, 0x00259c, (1 << unit)); 817 mask &= ~(1 << unit); 818 } 819 stat &= ~0x10000000; 820 } 821 822 if (stat & 0x20000000) { 823 u32 mask = nvkm_rd32(device, 0x0025a0); 824 while (mask) { 825 u32 unit = __ffs(mask); 826 gk104_fifo_intr_pbdma_0(fifo, unit); 827 gk104_fifo_intr_pbdma_1(fifo, unit); 828 nvkm_wr32(device, 0x0025a0, (1 << unit)); 829 mask &= ~(1 << unit); 830 } 831 stat &= ~0x20000000; 832 } 833 834 if (stat & 0x40000000) { 835 gk104_fifo_intr_runlist(fifo); 836 stat &= ~0x40000000; 837 } 838 839 if (stat & 0x80000000) { 840 nvkm_wr32(device, 0x002100, 0x80000000); 841 gk104_fifo_intr_engine(fifo); 842 stat &= ~0x80000000; 843 } 844 845 if (stat) { 846 nvkm_error(subdev, "INTR %08x\n", stat); 847 nvkm_mask(device, 0x002140, stat, 0x00000000); 848 nvkm_wr32(device, 0x002100, stat); 849 } 850 } 851 852 void 853 gk104_fifo_fini(struct nvkm_fifo *base) 854 { 855 struct gk104_fifo *fifo = gk104_fifo(base); 856 struct nvkm_device *device = fifo->base.engine.subdev.device; 857 flush_work(&fifo->recover.work); 858 /* allow mmu fault interrupts, even when we're not using fifo */ 859 nvkm_mask(device, 0x002140, 0x10000000, 0x10000000); 860 } 861 862 int 863 gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data) 864 { 865 struct gk104_fifo *fifo = gk104_fifo(base); 866 switch (mthd) { 867 case NV_DEVICE_FIFO_RUNLISTS: 868 *data = (1ULL << fifo->runlist_nr) - 1; 869 return 0; 870 case NV_DEVICE_FIFO_RUNLIST_ENGINES(0)... 871 NV_DEVICE_FIFO_RUNLIST_ENGINES(63): { 872 int runl = mthd - NV_DEVICE_FIFO_RUNLIST_ENGINES(0), engn; 873 if (runl < fifo->runlist_nr) { 874 unsigned long engm = fifo->runlist[runl].engm; 875 struct nvkm_engine *engine; 876 *data = 0; 877 for_each_set_bit(engn, &engm, fifo->engine_nr) { 878 if ((engine = fifo->engine[engn].engine)) 879 *data |= BIT_ULL(engine->subdev.index); 880 } 881 return 0; 882 } 883 } 884 return -EINVAL; 885 default: 886 return -EINVAL; 887 } 888 } 889 890 int 891 gk104_fifo_oneinit(struct nvkm_fifo *base) 892 { 893 struct gk104_fifo *fifo = gk104_fifo(base); 894 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 895 struct nvkm_device *device = subdev->device; 896 struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device); 897 int engn, runl, pbid, ret, i, j; 898 enum nvkm_devidx engidx; 899 u32 *map; 900 901 fifo->pbdma_nr = fifo->func->pbdma->nr(fifo); 902 nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr); 903 904 /* Read PBDMA->runlist(s) mapping from HW. */ 905 if (!(map = kcalloc(fifo->pbdma_nr, sizeof(*map), GFP_KERNEL))) 906 return -ENOMEM; 907 908 for (i = 0; i < fifo->pbdma_nr; i++) 909 map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04)); 910 911 /* Determine runlist configuration from topology device info. */ 912 i = 0; 913 while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) { 914 /* Determine which PBDMA handles requests for this engine. */ 915 for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) { 916 if (map[j] & (1 << runl)) { 917 pbid = j; 918 break; 919 } 920 } 921 922 nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n", 923 engn, runl, pbid, nvkm_subdev_name[engidx]); 924 925 fifo->engine[engn].engine = nvkm_device_engine(device, engidx); 926 fifo->engine[engn].runl = runl; 927 fifo->engine[engn].pbid = pbid; 928 fifo->engine_nr = max(fifo->engine_nr, engn + 1); 929 fifo->runlist[runl].engm |= 1 << engn; 930 fifo->runlist_nr = max(fifo->runlist_nr, runl + 1); 931 } 932 933 kfree(map); 934 935 for (i = 0; i < fifo->runlist_nr; i++) { 936 for (j = 0; j < ARRAY_SIZE(fifo->runlist[i].mem); j++) { 937 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 938 fifo->base.nr * 2/* TSG+chan */ * 939 fifo->func->runlist->size, 940 0x1000, false, 941 &fifo->runlist[i].mem[j]); 942 if (ret) 943 return ret; 944 } 945 946 init_waitqueue_head(&fifo->runlist[i].wait); 947 INIT_LIST_HEAD(&fifo->runlist[i].cgrp); 948 INIT_LIST_HEAD(&fifo->runlist[i].chan); 949 } 950 951 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 952 fifo->base.nr * 0x200, 0x1000, true, 953 &fifo->user.mem); 954 if (ret) 955 return ret; 956 957 ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem), 958 &fifo->user.bar); 959 if (ret) 960 return ret; 961 962 return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0); 963 } 964 965 void 966 gk104_fifo_init(struct nvkm_fifo *base) 967 { 968 struct gk104_fifo *fifo = gk104_fifo(base); 969 struct nvkm_device *device = fifo->base.engine.subdev.device; 970 int i; 971 972 /* Enable PBDMAs. */ 973 fifo->func->pbdma->init(fifo); 974 975 /* PBDMA[n] */ 976 for (i = 0; i < fifo->pbdma_nr; i++) { 977 nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); 978 nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ 979 nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ 980 } 981 982 /* PBDMA[n].HCE */ 983 for (i = 0; i < fifo->pbdma_nr; i++) { 984 nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */ 985 nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */ 986 } 987 988 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12); 989 990 if (fifo->func->pbdma->init_timeout) 991 fifo->func->pbdma->init_timeout(fifo); 992 993 nvkm_wr32(device, 0x002100, 0xffffffff); 994 nvkm_wr32(device, 0x002140, 0x7fffffff); 995 } 996 997 void * 998 gk104_fifo_dtor(struct nvkm_fifo *base) 999 { 1000 struct gk104_fifo *fifo = gk104_fifo(base); 1001 struct nvkm_device *device = fifo->base.engine.subdev.device; 1002 int i; 1003 1004 nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar); 1005 nvkm_memory_unref(&fifo->user.mem); 1006 1007 for (i = 0; i < fifo->runlist_nr; i++) { 1008 nvkm_memory_unref(&fifo->runlist[i].mem[1]); 1009 nvkm_memory_unref(&fifo->runlist[i].mem[0]); 1010 } 1011 1012 return fifo; 1013 } 1014 1015 static const struct nvkm_fifo_func 1016 gk104_fifo_ = { 1017 .dtor = gk104_fifo_dtor, 1018 .oneinit = gk104_fifo_oneinit, 1019 .info = gk104_fifo_info, 1020 .init = gk104_fifo_init, 1021 .fini = gk104_fifo_fini, 1022 .intr = gk104_fifo_intr, 1023 .fault = gk104_fifo_fault, 1024 .uevent_init = gk104_fifo_uevent_init, 1025 .uevent_fini = gk104_fifo_uevent_fini, 1026 .recover_chan = gk104_fifo_recover_chan, 1027 .class_get = gk104_fifo_class_get, 1028 .class_new = gk104_fifo_class_new, 1029 }; 1030 1031 int 1032 gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device, 1033 int index, int nr, struct nvkm_fifo **pfifo) 1034 { 1035 struct gk104_fifo *fifo; 1036 1037 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) 1038 return -ENOMEM; 1039 fifo->func = func; 1040 INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work); 1041 *pfifo = &fifo->base; 1042 1043 return nvkm_fifo_ctor(&gk104_fifo_, device, index, nr, &fifo->base); 1044 } 1045 1046 const struct nvkm_enum 1047 gk104_fifo_fault_access[] = { 1048 { 0x0, "READ" }, 1049 { 0x1, "WRITE" }, 1050 {} 1051 }; 1052 1053 const struct nvkm_enum 1054 gk104_fifo_fault_engine[] = { 1055 { 0x00, "GR", NULL, NVKM_ENGINE_GR }, 1056 { 0x01, "DISPLAY" }, 1057 { 0x02, "CAPTURE" }, 1058 { 0x03, "IFB", NULL, NVKM_ENGINE_IFB }, 1059 { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR }, 1060 { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM }, 1061 { 0x06, "SCHED" }, 1062 { 0x07, "HOST0", NULL, NVKM_ENGINE_FIFO }, 1063 { 0x08, "HOST1", NULL, NVKM_ENGINE_FIFO }, 1064 { 0x09, "HOST2", NULL, NVKM_ENGINE_FIFO }, 1065 { 0x0a, "HOST3", NULL, NVKM_ENGINE_FIFO }, 1066 { 0x0b, "HOST4", NULL, NVKM_ENGINE_FIFO }, 1067 { 0x0c, "HOST5", NULL, NVKM_ENGINE_FIFO }, 1068 { 0x0d, "HOST6", NULL, NVKM_ENGINE_FIFO }, 1069 { 0x0e, "HOST7", NULL, NVKM_ENGINE_FIFO }, 1070 { 0x0f, "HOSTSR" }, 1071 { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD }, 1072 { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP }, 1073 { 0x13, "PERF" }, 1074 { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC }, 1075 { 0x15, "CE0", NULL, NVKM_ENGINE_CE0 }, 1076 { 0x16, "CE1", NULL, NVKM_ENGINE_CE1 }, 1077 { 0x17, "PMU" }, 1078 { 0x18, "PTP" }, 1079 { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC }, 1080 { 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 }, 1081 {} 1082 }; 1083 1084 const struct nvkm_enum 1085 gk104_fifo_fault_reason[] = { 1086 { 0x00, "PDE" }, 1087 { 0x01, "PDE_SIZE" }, 1088 { 0x02, "PTE" }, 1089 { 0x03, "VA_LIMIT_VIOLATION" }, 1090 { 0x04, "UNBOUND_INST_BLOCK" }, 1091 { 0x05, "PRIV_VIOLATION" }, 1092 { 0x06, "RO_VIOLATION" }, 1093 { 0x07, "WO_VIOLATION" }, 1094 { 0x08, "PITCH_MASK_VIOLATION" }, 1095 { 0x09, "WORK_CREATION" }, 1096 { 0x0a, "UNSUPPORTED_APERTURE" }, 1097 { 0x0b, "COMPRESSION_FAILURE" }, 1098 { 0x0c, "UNSUPPORTED_KIND" }, 1099 { 0x0d, "REGION_VIOLATION" }, 1100 { 0x0e, "BOTH_PTES_VALID" }, 1101 { 0x0f, "INFO_TYPE_POISONED" }, 1102 {} 1103 }; 1104 1105 const struct nvkm_enum 1106 gk104_fifo_fault_hubclient[] = { 1107 { 0x00, "VIP" }, 1108 { 0x01, "CE0" }, 1109 { 0x02, "CE1" }, 1110 { 0x03, "DNISO" }, 1111 { 0x04, "FE" }, 1112 { 0x05, "FECS" }, 1113 { 0x06, "HOST" }, 1114 { 0x07, "HOST_CPU" }, 1115 { 0x08, "HOST_CPU_NB" }, 1116 { 0x09, "ISO" }, 1117 { 0x0a, "MMU" }, 1118 { 0x0b, "MSPDEC" }, 1119 { 0x0c, "MSPPP" }, 1120 { 0x0d, "MSVLD" }, 1121 { 0x0e, "NISO" }, 1122 { 0x0f, "P2P" }, 1123 { 0x10, "PD" }, 1124 { 0x11, "PERF" }, 1125 { 0x12, "PMU" }, 1126 { 0x13, "RASTERTWOD" }, 1127 { 0x14, "SCC" }, 1128 { 0x15, "SCC_NB" }, 1129 { 0x16, "SEC" }, 1130 { 0x17, "SSYNC" }, 1131 { 0x18, "GR_CE" }, 1132 { 0x19, "CE2" }, 1133 { 0x1a, "XV" }, 1134 { 0x1b, "MMU_NB" }, 1135 { 0x1c, "MSENC" }, 1136 { 0x1d, "DFALCON" }, 1137 { 0x1e, "SKED" }, 1138 { 0x1f, "AFALCON" }, 1139 {} 1140 }; 1141 1142 const struct nvkm_enum 1143 gk104_fifo_fault_gpcclient[] = { 1144 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" }, 1145 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" }, 1146 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" }, 1147 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" }, 1148 { 0x0c, "RAST" }, 1149 { 0x0d, "GCC" }, 1150 { 0x0e, "GPCCS" }, 1151 { 0x0f, "PROP_0" }, 1152 { 0x10, "PROP_1" }, 1153 { 0x11, "PROP_2" }, 1154 { 0x12, "PROP_3" }, 1155 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" }, 1156 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" }, 1157 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" }, 1158 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" }, 1159 { 0x1f, "GPM" }, 1160 { 0x20, "LTP_UTLB_0" }, 1161 { 0x21, "LTP_UTLB_1" }, 1162 { 0x22, "LTP_UTLB_2" }, 1163 { 0x23, "LTP_UTLB_3" }, 1164 { 0x24, "GPC_RGG_UTLB" }, 1165 {} 1166 }; 1167 1168 static const struct gk104_fifo_func 1169 gk104_fifo = { 1170 .intr.fault = gf100_fifo_intr_fault, 1171 .pbdma = &gk104_fifo_pbdma, 1172 .fault.access = gk104_fifo_fault_access, 1173 .fault.engine = gk104_fifo_fault_engine, 1174 .fault.reason = gk104_fifo_fault_reason, 1175 .fault.hubclient = gk104_fifo_fault_hubclient, 1176 .fault.gpcclient = gk104_fifo_fault_gpcclient, 1177 .runlist = &gk104_fifo_runlist, 1178 .chan = {{0,0,KEPLER_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new }, 1179 }; 1180 1181 int 1182 gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) 1183 { 1184 return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo); 1185 } 1186