1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "gk104.h" 25 #include "cgrp.h" 26 #include "changk104.h" 27 28 #include <core/client.h> 29 #include <core/gpuobj.h> 30 #include <subdev/bar.h> 31 #include <subdev/fault.h> 32 #include <subdev/timer.h> 33 #include <subdev/top.h> 34 #include <engine/sw.h> 35 36 #include <nvif/class.h> 37 #include <nvif/cl0080.h> 38 39 struct gk104_fifo_engine_status { 40 bool busy; 41 bool faulted; 42 bool chsw; 43 bool save; 44 bool load; 45 struct { 46 bool tsg; 47 u32 id; 48 } prev, next, *chan; 49 }; 50 51 static void 52 gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn, 53 struct gk104_fifo_engine_status *status) 54 { 55 struct nvkm_engine *engine = fifo->engine[engn].engine; 56 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 57 struct nvkm_device *device = subdev->device; 58 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08)); 59 60 status->busy = !!(stat & 0x80000000); 61 status->faulted = !!(stat & 0x40000000); 62 status->next.tsg = !!(stat & 0x10000000); 63 status->next.id = (stat & 0x0fff0000) >> 16; 64 status->chsw = !!(stat & 0x00008000); 65 status->save = !!(stat & 0x00004000); 66 status->load = !!(stat & 0x00002000); 67 status->prev.tsg = !!(stat & 0x00001000); 68 status->prev.id = (stat & 0x00000fff); 69 status->chan = NULL; 70 71 if (status->busy && status->chsw) { 72 if (status->load && status->save) { 73 if (engine && nvkm_engine_chsw_load(engine)) 74 status->chan = &status->next; 75 else 76 status->chan = &status->prev; 77 } else 78 if (status->load) { 79 status->chan = &status->next; 80 } else { 81 status->chan = &status->prev; 82 } 83 } else 84 if (status->load) { 85 status->chan = &status->prev; 86 } 87 88 nvkm_debug(subdev, "engine %02d: busy %d faulted %d chsw %d " 89 "save %d load %d %sid %d%s-> %sid %d%s\n", 90 engn, status->busy, status->faulted, 91 status->chsw, status->save, status->load, 92 status->prev.tsg ? "tsg" : "ch", status->prev.id, 93 status->chan == &status->prev ? "*" : " ", 94 status->next.tsg ? "tsg" : "ch", status->next.id, 95 status->chan == &status->next ? "*" : " "); 96 } 97 98 static int 99 gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, 100 void *argv, u32 argc, struct nvkm_object **pobject) 101 { 102 struct gk104_fifo *fifo = gk104_fifo(base); 103 if (oclass->engn == &fifo->func->chan) { 104 const struct gk104_fifo_chan_user *user = oclass->engn; 105 return user->ctor(fifo, oclass, argv, argc, pobject); 106 } else 107 if (oclass->engn == &fifo->func->user) { 108 const struct gk104_fifo_user_user *user = oclass->engn; 109 return user->ctor(oclass, argv, argc, pobject); 110 } 111 WARN_ON(1); 112 return -EINVAL; 113 } 114 115 static int 116 gk104_fifo_class_get(struct nvkm_fifo *base, int index, 117 struct nvkm_oclass *oclass) 118 { 119 struct gk104_fifo *fifo = gk104_fifo(base); 120 int c = 0; 121 122 if (fifo->func->user.ctor && c++ == index) { 123 oclass->base = fifo->func->user.user; 124 oclass->engn = &fifo->func->user; 125 return 0; 126 } 127 128 if (fifo->func->chan.ctor && c++ == index) { 129 oclass->base = fifo->func->chan.user; 130 oclass->engn = &fifo->func->chan; 131 return 0; 132 } 133 134 return c; 135 } 136 137 static void 138 gk104_fifo_uevent_fini(struct nvkm_fifo *fifo) 139 { 140 struct nvkm_device *device = fifo->engine.subdev.device; 141 nvkm_mask(device, 0x002140, 0x80000000, 0x00000000); 142 } 143 144 static void 145 gk104_fifo_uevent_init(struct nvkm_fifo *fifo) 146 { 147 struct nvkm_device *device = fifo->engine.subdev.device; 148 nvkm_mask(device, 0x002140, 0x80000000, 0x80000000); 149 } 150 151 void 152 gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl) 153 { 154 const struct gk104_fifo_runlist_func *func = fifo->func->runlist; 155 struct gk104_fifo_chan *chan; 156 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 157 struct nvkm_device *device = subdev->device; 158 struct nvkm_memory *mem; 159 struct nvkm_fifo_cgrp *cgrp; 160 int nr = 0; 161 int target; 162 163 mutex_lock(&subdev->mutex); 164 mem = fifo->runlist[runl].mem[fifo->runlist[runl].next]; 165 fifo->runlist[runl].next = !fifo->runlist[runl].next; 166 167 nvkm_kmap(mem); 168 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { 169 func->chan(chan, mem, nr++ * func->size); 170 } 171 172 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) { 173 func->cgrp(cgrp, mem, nr++ * func->size); 174 list_for_each_entry(chan, &cgrp->chan, head) { 175 func->chan(chan, mem, nr++ * func->size); 176 } 177 } 178 nvkm_done(mem); 179 180 switch (nvkm_memory_target(mem)) { 181 case NVKM_MEM_TARGET_VRAM: target = 0; break; 182 case NVKM_MEM_TARGET_NCOH: target = 3; break; 183 default: 184 WARN_ON(1); 185 goto unlock; 186 } 187 188 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) | 189 (target << 28)); 190 nvkm_wr32(device, 0x002274, (runl << 20) | nr); 191 192 if (nvkm_msec(device, 2000, 193 if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000)) 194 break; 195 ) < 0) 196 nvkm_error(subdev, "runlist %d update timeout\n", runl); 197 unlock: 198 mutex_unlock(&subdev->mutex); 199 } 200 201 void 202 gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) 203 { 204 struct nvkm_fifo_cgrp *cgrp = chan->cgrp; 205 mutex_lock(&fifo->base.engine.subdev.mutex); 206 if (!list_empty(&chan->head)) { 207 list_del_init(&chan->head); 208 if (cgrp && !--cgrp->chan_nr) 209 list_del_init(&cgrp->head); 210 } 211 mutex_unlock(&fifo->base.engine.subdev.mutex); 212 } 213 214 void 215 gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) 216 { 217 struct nvkm_fifo_cgrp *cgrp = chan->cgrp; 218 mutex_lock(&fifo->base.engine.subdev.mutex); 219 if (cgrp) { 220 if (!cgrp->chan_nr++) 221 list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp); 222 list_add_tail(&chan->head, &cgrp->chan); 223 } else { 224 list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan); 225 } 226 mutex_unlock(&fifo->base.engine.subdev.mutex); 227 } 228 229 void 230 gk104_fifo_runlist_chan(struct gk104_fifo_chan *chan, 231 struct nvkm_memory *memory, u32 offset) 232 { 233 nvkm_wo32(memory, offset + 0, chan->base.chid); 234 nvkm_wo32(memory, offset + 4, 0x00000000); 235 } 236 237 const struct gk104_fifo_runlist_func 238 gk104_fifo_runlist = { 239 .size = 8, 240 .chan = gk104_fifo_runlist_chan, 241 }; 242 243 static void 244 gk104_fifo_recover_work(struct work_struct *w) 245 { 246 struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work); 247 struct nvkm_device *device = fifo->base.engine.subdev.device; 248 struct nvkm_engine *engine; 249 unsigned long flags; 250 u32 engm, runm, todo; 251 int engn, runl; 252 253 spin_lock_irqsave(&fifo->base.lock, flags); 254 runm = fifo->recover.runm; 255 engm = fifo->recover.engm; 256 fifo->recover.engm = 0; 257 fifo->recover.runm = 0; 258 spin_unlock_irqrestore(&fifo->base.lock, flags); 259 260 nvkm_mask(device, 0x002630, runm, runm); 261 262 for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) { 263 if ((engine = fifo->engine[engn].engine)) { 264 nvkm_subdev_fini(&engine->subdev, false); 265 WARN_ON(nvkm_subdev_init(&engine->subdev)); 266 } 267 } 268 269 for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl)) 270 gk104_fifo_runlist_commit(fifo, runl); 271 272 nvkm_wr32(device, 0x00262c, runm); 273 nvkm_mask(device, 0x002630, runm, 0x00000000); 274 } 275 276 static void gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn); 277 278 static void 279 gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl) 280 { 281 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 282 struct nvkm_device *device = subdev->device; 283 const u32 runm = BIT(runl); 284 285 assert_spin_locked(&fifo->base.lock); 286 if (fifo->recover.runm & runm) 287 return; 288 fifo->recover.runm |= runm; 289 290 /* Block runlist to prevent channel assignment(s) from changing. */ 291 nvkm_mask(device, 0x002630, runm, runm); 292 293 /* Schedule recovery. */ 294 nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl); 295 schedule_work(&fifo->recover.work); 296 } 297 298 static struct gk104_fifo_chan * 299 gk104_fifo_recover_chid(struct gk104_fifo *fifo, int runl, int chid) 300 { 301 struct gk104_fifo_chan *chan; 302 struct nvkm_fifo_cgrp *cgrp; 303 304 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { 305 if (chan->base.chid == chid) { 306 list_del_init(&chan->head); 307 return chan; 308 } 309 } 310 311 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) { 312 if (cgrp->id == chid) { 313 chan = list_first_entry(&cgrp->chan, typeof(*chan), head); 314 list_del_init(&chan->head); 315 if (!--cgrp->chan_nr) 316 list_del_init(&cgrp->head); 317 return chan; 318 } 319 } 320 321 return NULL; 322 } 323 324 static void 325 gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid) 326 { 327 struct gk104_fifo *fifo = gk104_fifo(base); 328 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 329 struct nvkm_device *device = subdev->device; 330 const u32 stat = nvkm_rd32(device, 0x800004 + (chid * 0x08)); 331 const u32 runl = (stat & 0x000f0000) >> 16; 332 const bool used = (stat & 0x00000001); 333 unsigned long engn, engm = fifo->runlist[runl].engm; 334 struct gk104_fifo_chan *chan; 335 336 assert_spin_locked(&fifo->base.lock); 337 if (!used) 338 return; 339 340 /* Lookup SW state for channel, and mark it as dead. */ 341 chan = gk104_fifo_recover_chid(fifo, runl, chid); 342 if (chan) { 343 chan->killed = true; 344 nvkm_fifo_kevent(&fifo->base, chid); 345 } 346 347 /* Disable channel. */ 348 nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800); 349 nvkm_warn(subdev, "channel %d: killed\n", chid); 350 351 /* Block channel assignments from changing during recovery. */ 352 gk104_fifo_recover_runl(fifo, runl); 353 354 /* Schedule recovery for any engines the channel is on. */ 355 for_each_set_bit(engn, &engm, fifo->engine_nr) { 356 struct gk104_fifo_engine_status status; 357 gk104_fifo_engine_status(fifo, engn, &status); 358 if (!status.chan || status.chan->id != chid) 359 continue; 360 gk104_fifo_recover_engn(fifo, engn); 361 } 362 } 363 364 static void 365 gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn) 366 { 367 struct nvkm_engine *engine = fifo->engine[engn].engine; 368 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 369 struct nvkm_device *device = subdev->device; 370 const u32 runl = fifo->engine[engn].runl; 371 const u32 engm = BIT(engn); 372 struct gk104_fifo_engine_status status; 373 int mmui = -1; 374 375 assert_spin_locked(&fifo->base.lock); 376 if (fifo->recover.engm & engm) 377 return; 378 fifo->recover.engm |= engm; 379 380 /* Block channel assignments from changing during recovery. */ 381 gk104_fifo_recover_runl(fifo, runl); 382 383 /* Determine which channel (if any) is currently on the engine. */ 384 gk104_fifo_engine_status(fifo, engn, &status); 385 if (status.chan) { 386 /* The channel is not longer viable, kill it. */ 387 gk104_fifo_recover_chan(&fifo->base, status.chan->id); 388 } 389 390 /* Determine MMU fault ID for the engine, if we're not being 391 * called from the fault handler already. 392 */ 393 if (!status.faulted && engine) { 394 mmui = nvkm_top_fault_id(device, engine->subdev.index); 395 if (mmui < 0) { 396 const struct nvkm_enum *en = fifo->func->fault.engine; 397 for (; en && en->name; en++) { 398 if (en->data2 == engine->subdev.index) { 399 mmui = en->value; 400 break; 401 } 402 } 403 } 404 WARN_ON(mmui < 0); 405 } 406 407 /* Trigger a MMU fault for the engine. 408 * 409 * No good idea why this is needed, but nvgpu does something similar, 410 * and it makes recovery from CTXSW_TIMEOUT a lot more reliable. 411 */ 412 if (mmui >= 0) { 413 nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000100 | mmui); 414 415 /* Wait for fault to trigger. */ 416 nvkm_msec(device, 2000, 417 gk104_fifo_engine_status(fifo, engn, &status); 418 if (status.faulted) 419 break; 420 ); 421 422 /* Release MMU fault trigger, and ACK the fault. */ 423 nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000000); 424 nvkm_wr32(device, 0x00259c, BIT(mmui)); 425 nvkm_wr32(device, 0x002100, 0x10000000); 426 } 427 428 /* Schedule recovery. */ 429 nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn); 430 schedule_work(&fifo->recover.work); 431 } 432 433 static void 434 gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info) 435 { 436 struct gk104_fifo *fifo = gk104_fifo(base); 437 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 438 struct nvkm_device *device = subdev->device; 439 const struct nvkm_enum *er, *ee, *ec, *ea; 440 struct nvkm_engine *engine = NULL; 441 struct nvkm_fifo_chan *chan; 442 unsigned long flags; 443 char ct[8] = "HUB/", en[16] = ""; 444 int engn; 445 446 er = nvkm_enum_find(fifo->func->fault.reason, info->reason); 447 ee = nvkm_enum_find(fifo->func->fault.engine, info->engine); 448 if (info->hub) { 449 ec = nvkm_enum_find(fifo->func->fault.hubclient, info->client); 450 } else { 451 ec = nvkm_enum_find(fifo->func->fault.gpcclient, info->client); 452 snprintf(ct, sizeof(ct), "GPC%d/", info->gpc); 453 } 454 ea = nvkm_enum_find(fifo->func->fault.access, info->access); 455 456 if (ee && ee->data2) { 457 switch (ee->data2) { 458 case NVKM_SUBDEV_BAR: 459 nvkm_mask(device, 0x001704, 0x00000000, 0x00000000); 460 break; 461 case NVKM_SUBDEV_INSTMEM: 462 nvkm_mask(device, 0x001714, 0x00000000, 0x00000000); 463 break; 464 case NVKM_ENGINE_IFB: 465 nvkm_mask(device, 0x001718, 0x00000000, 0x00000000); 466 break; 467 default: 468 engine = nvkm_device_engine(device, ee->data2); 469 break; 470 } 471 } 472 473 if (ee == NULL) { 474 enum nvkm_devidx engidx = nvkm_top_fault(device, info->engine); 475 if (engidx < NVKM_SUBDEV_NR) { 476 const char *src = nvkm_subdev_name[engidx]; 477 char *dst = en; 478 do { 479 *dst++ = toupper(*src++); 480 } while(*src); 481 engine = nvkm_device_engine(device, engidx); 482 } 483 } else { 484 snprintf(en, sizeof(en), "%s", ee->name); 485 } 486 487 spin_lock_irqsave(&fifo->base.lock, flags); 488 chan = nvkm_fifo_chan_inst_locked(&fifo->base, info->inst); 489 490 nvkm_error(subdev, 491 "fault %02x [%s] at %016llx engine %02x [%s] client %02x " 492 "[%s%s] reason %02x [%s] on channel %d [%010llx %s]\n", 493 info->access, ea ? ea->name : "", info->addr, 494 info->engine, ee ? ee->name : en, 495 info->client, ct, ec ? ec->name : "", 496 info->reason, er ? er->name : "", chan ? chan->chid : -1, 497 info->inst, chan ? chan->object.client->name : "unknown"); 498 499 /* Kill the channel that caused the fault. */ 500 if (chan) 501 gk104_fifo_recover_chan(&fifo->base, chan->chid); 502 503 /* Channel recovery will probably have already done this for the 504 * correct engine(s), but just in case we can't find the channel 505 * information... 506 */ 507 for (engn = 0; engn < fifo->engine_nr && engine; engn++) { 508 if (fifo->engine[engn].engine == engine) { 509 gk104_fifo_recover_engn(fifo, engn); 510 break; 511 } 512 } 513 514 spin_unlock_irqrestore(&fifo->base.lock, flags); 515 } 516 517 static const struct nvkm_enum 518 gk104_fifo_bind_reason[] = { 519 { 0x01, "BIND_NOT_UNBOUND" }, 520 { 0x02, "SNOOP_WITHOUT_BAR1" }, 521 { 0x03, "UNBIND_WHILE_RUNNING" }, 522 { 0x05, "INVALID_RUNLIST" }, 523 { 0x06, "INVALID_CTX_TGT" }, 524 { 0x0b, "UNBIND_WHILE_PARKED" }, 525 {} 526 }; 527 528 static void 529 gk104_fifo_intr_bind(struct gk104_fifo *fifo) 530 { 531 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 532 struct nvkm_device *device = subdev->device; 533 u32 intr = nvkm_rd32(device, 0x00252c); 534 u32 code = intr & 0x000000ff; 535 const struct nvkm_enum *en = 536 nvkm_enum_find(gk104_fifo_bind_reason, code); 537 538 nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : ""); 539 } 540 541 static const struct nvkm_enum 542 gk104_fifo_sched_reason[] = { 543 { 0x0a, "CTXSW_TIMEOUT" }, 544 {} 545 }; 546 547 static void 548 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) 549 { 550 struct nvkm_device *device = fifo->base.engine.subdev.device; 551 unsigned long flags, engm = 0; 552 u32 engn; 553 554 /* We need to ACK the SCHED_ERROR here, and prevent it reasserting, 555 * as MMU_FAULT cannot be triggered while it's pending. 556 */ 557 spin_lock_irqsave(&fifo->base.lock, flags); 558 nvkm_mask(device, 0x002140, 0x00000100, 0x00000000); 559 nvkm_wr32(device, 0x002100, 0x00000100); 560 561 for (engn = 0; engn < fifo->engine_nr; engn++) { 562 struct gk104_fifo_engine_status status; 563 564 gk104_fifo_engine_status(fifo, engn, &status); 565 if (!status.busy || !status.chsw) 566 continue; 567 568 engm |= BIT(engn); 569 } 570 571 for_each_set_bit(engn, &engm, fifo->engine_nr) 572 gk104_fifo_recover_engn(fifo, engn); 573 574 nvkm_mask(device, 0x002140, 0x00000100, 0x00000100); 575 spin_unlock_irqrestore(&fifo->base.lock, flags); 576 } 577 578 static void 579 gk104_fifo_intr_sched(struct gk104_fifo *fifo) 580 { 581 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 582 struct nvkm_device *device = subdev->device; 583 u32 intr = nvkm_rd32(device, 0x00254c); 584 u32 code = intr & 0x000000ff; 585 const struct nvkm_enum *en = 586 nvkm_enum_find(gk104_fifo_sched_reason, code); 587 588 nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : ""); 589 590 switch (code) { 591 case 0x0a: 592 gk104_fifo_intr_sched_ctxsw(fifo); 593 break; 594 default: 595 break; 596 } 597 } 598 599 static void 600 gk104_fifo_intr_chsw(struct gk104_fifo *fifo) 601 { 602 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 603 struct nvkm_device *device = subdev->device; 604 u32 stat = nvkm_rd32(device, 0x00256c); 605 nvkm_error(subdev, "CHSW_ERROR %08x\n", stat); 606 nvkm_wr32(device, 0x00256c, stat); 607 } 608 609 static void 610 gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo) 611 { 612 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 613 struct nvkm_device *device = subdev->device; 614 u32 stat = nvkm_rd32(device, 0x00259c); 615 nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat); 616 } 617 618 static void 619 gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit) 620 { 621 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 622 struct nvkm_device *device = subdev->device; 623 u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10)); 624 u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10)); 625 u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10)); 626 u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10)); 627 struct nvkm_fault_data info; 628 629 info.inst = (u64)inst << 12; 630 info.addr = ((u64)vahi << 32) | valo; 631 info.time = 0; 632 info.engine = unit; 633 info.valid = 1; 634 info.gpc = (type & 0x1f000000) >> 24; 635 info.client = (type & 0x00001f00) >> 8; 636 info.access = (type & 0x00000080) >> 7; 637 info.hub = (type & 0x00000040) >> 6; 638 info.reason = (type & 0x000000ff); 639 640 nvkm_fifo_fault(&fifo->base, &info); 641 } 642 643 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = { 644 { 0x00000001, "MEMREQ" }, 645 { 0x00000002, "MEMACK_TIMEOUT" }, 646 { 0x00000004, "MEMACK_EXTRA" }, 647 { 0x00000008, "MEMDAT_TIMEOUT" }, 648 { 0x00000010, "MEMDAT_EXTRA" }, 649 { 0x00000020, "MEMFLUSH" }, 650 { 0x00000040, "MEMOP" }, 651 { 0x00000080, "LBCONNECT" }, 652 { 0x00000100, "LBREQ" }, 653 { 0x00000200, "LBACK_TIMEOUT" }, 654 { 0x00000400, "LBACK_EXTRA" }, 655 { 0x00000800, "LBDAT_TIMEOUT" }, 656 { 0x00001000, "LBDAT_EXTRA" }, 657 { 0x00002000, "GPFIFO" }, 658 { 0x00004000, "GPPTR" }, 659 { 0x00008000, "GPENTRY" }, 660 { 0x00010000, "GPCRC" }, 661 { 0x00020000, "PBPTR" }, 662 { 0x00040000, "PBENTRY" }, 663 { 0x00080000, "PBCRC" }, 664 { 0x00100000, "XBARCONNECT" }, 665 { 0x00200000, "METHOD" }, 666 { 0x00400000, "METHODCRC" }, 667 { 0x00800000, "DEVICE" }, 668 { 0x02000000, "SEMAPHORE" }, 669 { 0x04000000, "ACQUIRE" }, 670 { 0x08000000, "PRI" }, 671 { 0x20000000, "NO_CTXSW_SEG" }, 672 { 0x40000000, "PBSEG" }, 673 { 0x80000000, "SIGNATURE" }, 674 {} 675 }; 676 677 static void 678 gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit) 679 { 680 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 681 struct nvkm_device *device = subdev->device; 682 u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000)); 683 u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask; 684 u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000)); 685 u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000)); 686 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff; 687 u32 subc = (addr & 0x00070000) >> 16; 688 u32 mthd = (addr & 0x00003ffc); 689 u32 show = stat; 690 struct nvkm_fifo_chan *chan; 691 unsigned long flags; 692 char msg[128]; 693 694 if (stat & 0x00800000) { 695 if (device->sw) { 696 if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data)) 697 show &= ~0x00800000; 698 } 699 } 700 701 nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008); 702 703 if (show) { 704 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show); 705 chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags); 706 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] " 707 "subc %d mthd %04x data %08x\n", 708 unit, show, msg, chid, chan ? chan->inst->addr : 0, 709 chan ? chan->object.client->name : "unknown", 710 subc, mthd, data); 711 nvkm_fifo_chan_put(&fifo->base, flags, &chan); 712 } 713 714 nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat); 715 } 716 717 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = { 718 { 0x00000001, "HCE_RE_ILLEGAL_OP" }, 719 { 0x00000002, "HCE_RE_ALIGNB" }, 720 { 0x00000004, "HCE_PRIV" }, 721 { 0x00000008, "HCE_ILLEGAL_MTHD" }, 722 { 0x00000010, "HCE_ILLEGAL_CLASS" }, 723 {} 724 }; 725 726 static void 727 gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit) 728 { 729 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 730 struct nvkm_device *device = subdev->device; 731 u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000)); 732 u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask; 733 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff; 734 char msg[128]; 735 736 if (stat) { 737 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat); 738 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n", 739 unit, stat, msg, chid, 740 nvkm_rd32(device, 0x040150 + (unit * 0x2000)), 741 nvkm_rd32(device, 0x040154 + (unit * 0x2000))); 742 } 743 744 nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat); 745 } 746 747 static void 748 gk104_fifo_intr_runlist(struct gk104_fifo *fifo) 749 { 750 struct nvkm_device *device = fifo->base.engine.subdev.device; 751 u32 mask = nvkm_rd32(device, 0x002a00); 752 while (mask) { 753 int runl = __ffs(mask); 754 wake_up(&fifo->runlist[runl].wait); 755 nvkm_wr32(device, 0x002a00, 1 << runl); 756 mask &= ~(1 << runl); 757 } 758 } 759 760 static void 761 gk104_fifo_intr_engine(struct gk104_fifo *fifo) 762 { 763 nvkm_fifo_uevent(&fifo->base); 764 } 765 766 static void 767 gk104_fifo_intr(struct nvkm_fifo *base) 768 { 769 struct gk104_fifo *fifo = gk104_fifo(base); 770 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 771 struct nvkm_device *device = subdev->device; 772 u32 mask = nvkm_rd32(device, 0x002140); 773 u32 stat = nvkm_rd32(device, 0x002100) & mask; 774 775 if (stat & 0x00000001) { 776 gk104_fifo_intr_bind(fifo); 777 nvkm_wr32(device, 0x002100, 0x00000001); 778 stat &= ~0x00000001; 779 } 780 781 if (stat & 0x00000010) { 782 nvkm_error(subdev, "PIO_ERROR\n"); 783 nvkm_wr32(device, 0x002100, 0x00000010); 784 stat &= ~0x00000010; 785 } 786 787 if (stat & 0x00000100) { 788 gk104_fifo_intr_sched(fifo); 789 nvkm_wr32(device, 0x002100, 0x00000100); 790 stat &= ~0x00000100; 791 } 792 793 if (stat & 0x00010000) { 794 gk104_fifo_intr_chsw(fifo); 795 nvkm_wr32(device, 0x002100, 0x00010000); 796 stat &= ~0x00010000; 797 } 798 799 if (stat & 0x00800000) { 800 nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n"); 801 nvkm_wr32(device, 0x002100, 0x00800000); 802 stat &= ~0x00800000; 803 } 804 805 if (stat & 0x01000000) { 806 nvkm_error(subdev, "LB_ERROR\n"); 807 nvkm_wr32(device, 0x002100, 0x01000000); 808 stat &= ~0x01000000; 809 } 810 811 if (stat & 0x08000000) { 812 gk104_fifo_intr_dropped_fault(fifo); 813 nvkm_wr32(device, 0x002100, 0x08000000); 814 stat &= ~0x08000000; 815 } 816 817 if (stat & 0x10000000) { 818 u32 mask = nvkm_rd32(device, 0x00259c); 819 while (mask) { 820 u32 unit = __ffs(mask); 821 gk104_fifo_intr_fault(fifo, unit); 822 nvkm_wr32(device, 0x00259c, (1 << unit)); 823 mask &= ~(1 << unit); 824 } 825 stat &= ~0x10000000; 826 } 827 828 if (stat & 0x20000000) { 829 u32 mask = nvkm_rd32(device, 0x0025a0); 830 while (mask) { 831 u32 unit = __ffs(mask); 832 gk104_fifo_intr_pbdma_0(fifo, unit); 833 gk104_fifo_intr_pbdma_1(fifo, unit); 834 nvkm_wr32(device, 0x0025a0, (1 << unit)); 835 mask &= ~(1 << unit); 836 } 837 stat &= ~0x20000000; 838 } 839 840 if (stat & 0x40000000) { 841 gk104_fifo_intr_runlist(fifo); 842 stat &= ~0x40000000; 843 } 844 845 if (stat & 0x80000000) { 846 nvkm_wr32(device, 0x002100, 0x80000000); 847 gk104_fifo_intr_engine(fifo); 848 stat &= ~0x80000000; 849 } 850 851 if (stat) { 852 nvkm_error(subdev, "INTR %08x\n", stat); 853 nvkm_mask(device, 0x002140, stat, 0x00000000); 854 nvkm_wr32(device, 0x002100, stat); 855 } 856 } 857 858 static void 859 gk104_fifo_fini(struct nvkm_fifo *base) 860 { 861 struct gk104_fifo *fifo = gk104_fifo(base); 862 struct nvkm_device *device = fifo->base.engine.subdev.device; 863 flush_work(&fifo->recover.work); 864 /* allow mmu fault interrupts, even when we're not using fifo */ 865 nvkm_mask(device, 0x002140, 0x10000000, 0x10000000); 866 } 867 868 static int 869 gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data) 870 { 871 struct gk104_fifo *fifo = gk104_fifo(base); 872 switch (mthd) { 873 case NV_DEVICE_FIFO_RUNLISTS: 874 *data = (1ULL << fifo->runlist_nr) - 1; 875 return 0; 876 case NV_DEVICE_FIFO_RUNLIST_ENGINES(0)... 877 NV_DEVICE_FIFO_RUNLIST_ENGINES(63): { 878 int runl = mthd - NV_DEVICE_FIFO_RUNLIST_ENGINES(0), engn; 879 if (runl < fifo->runlist_nr) { 880 unsigned long engm = fifo->runlist[runl].engm; 881 struct nvkm_engine *engine; 882 *data = 0; 883 for_each_set_bit(engn, &engm, fifo->engine_nr) { 884 if ((engine = fifo->engine[engn].engine)) 885 *data |= BIT_ULL(engine->subdev.index); 886 } 887 return 0; 888 } 889 } 890 return -EINVAL; 891 default: 892 return -EINVAL; 893 } 894 } 895 896 static int 897 gk104_fifo_oneinit(struct nvkm_fifo *base) 898 { 899 struct gk104_fifo *fifo = gk104_fifo(base); 900 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 901 struct nvkm_device *device = subdev->device; 902 struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device); 903 int engn, runl, pbid, ret, i, j; 904 enum nvkm_devidx engidx; 905 u32 *map; 906 907 /* Determine number of PBDMAs by checking valid enable bits. */ 908 nvkm_wr32(device, 0x000204, 0xffffffff); 909 fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x000204)); 910 nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr); 911 912 /* Read PBDMA->runlist(s) mapping from HW. */ 913 if (!(map = kcalloc(fifo->pbdma_nr, sizeof(*map), GFP_KERNEL))) 914 return -ENOMEM; 915 916 for (i = 0; i < fifo->pbdma_nr; i++) 917 map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04)); 918 919 /* Determine runlist configuration from topology device info. */ 920 i = 0; 921 while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) { 922 /* Determine which PBDMA handles requests for this engine. */ 923 for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) { 924 if (map[j] & (1 << runl)) { 925 pbid = j; 926 break; 927 } 928 } 929 930 nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n", 931 engn, runl, pbid, nvkm_subdev_name[engidx]); 932 933 fifo->engine[engn].engine = nvkm_device_engine(device, engidx); 934 fifo->engine[engn].runl = runl; 935 fifo->engine[engn].pbid = pbid; 936 fifo->engine_nr = max(fifo->engine_nr, engn + 1); 937 fifo->runlist[runl].engm |= 1 << engn; 938 fifo->runlist_nr = max(fifo->runlist_nr, runl + 1); 939 } 940 941 kfree(map); 942 943 for (i = 0; i < fifo->runlist_nr; i++) { 944 for (j = 0; j < ARRAY_SIZE(fifo->runlist[i].mem); j++) { 945 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 946 fifo->base.nr * 2/* TSG+chan */ * 947 fifo->func->runlist->size, 948 0x1000, false, 949 &fifo->runlist[i].mem[j]); 950 if (ret) 951 return ret; 952 } 953 954 init_waitqueue_head(&fifo->runlist[i].wait); 955 INIT_LIST_HEAD(&fifo->runlist[i].cgrp); 956 INIT_LIST_HEAD(&fifo->runlist[i].chan); 957 } 958 959 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 960 fifo->base.nr * 0x200, 0x1000, true, 961 &fifo->user.mem); 962 if (ret) 963 return ret; 964 965 ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem), 966 &fifo->user.bar); 967 if (ret) 968 return ret; 969 970 return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0); 971 } 972 973 static void 974 gk104_fifo_init(struct nvkm_fifo *base) 975 { 976 struct gk104_fifo *fifo = gk104_fifo(base); 977 struct nvkm_device *device = fifo->base.engine.subdev.device; 978 int i; 979 980 /* Enable PBDMAs. */ 981 nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1); 982 983 /* PBDMA[n] */ 984 for (i = 0; i < fifo->pbdma_nr; i++) { 985 nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); 986 nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ 987 nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ 988 } 989 990 /* PBDMA[n].HCE */ 991 for (i = 0; i < fifo->pbdma_nr; i++) { 992 nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */ 993 nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */ 994 } 995 996 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12); 997 998 if (fifo->func->init_pbdma_timeout) 999 fifo->func->init_pbdma_timeout(fifo); 1000 1001 nvkm_wr32(device, 0x002100, 0xffffffff); 1002 nvkm_wr32(device, 0x002140, 0x7fffffff); 1003 } 1004 1005 static void * 1006 gk104_fifo_dtor(struct nvkm_fifo *base) 1007 { 1008 struct gk104_fifo *fifo = gk104_fifo(base); 1009 struct nvkm_device *device = fifo->base.engine.subdev.device; 1010 int i; 1011 1012 nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar); 1013 nvkm_memory_unref(&fifo->user.mem); 1014 1015 for (i = 0; i < fifo->runlist_nr; i++) { 1016 nvkm_memory_unref(&fifo->runlist[i].mem[1]); 1017 nvkm_memory_unref(&fifo->runlist[i].mem[0]); 1018 } 1019 1020 return fifo; 1021 } 1022 1023 static const struct nvkm_fifo_func 1024 gk104_fifo_ = { 1025 .dtor = gk104_fifo_dtor, 1026 .oneinit = gk104_fifo_oneinit, 1027 .info = gk104_fifo_info, 1028 .init = gk104_fifo_init, 1029 .fini = gk104_fifo_fini, 1030 .intr = gk104_fifo_intr, 1031 .fault = gk104_fifo_fault, 1032 .uevent_init = gk104_fifo_uevent_init, 1033 .uevent_fini = gk104_fifo_uevent_fini, 1034 .recover_chan = gk104_fifo_recover_chan, 1035 .class_get = gk104_fifo_class_get, 1036 .class_new = gk104_fifo_class_new, 1037 }; 1038 1039 int 1040 gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device, 1041 int index, int nr, struct nvkm_fifo **pfifo) 1042 { 1043 struct gk104_fifo *fifo; 1044 1045 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) 1046 return -ENOMEM; 1047 fifo->func = func; 1048 INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work); 1049 *pfifo = &fifo->base; 1050 1051 return nvkm_fifo_ctor(&gk104_fifo_, device, index, nr, &fifo->base); 1052 } 1053 1054 const struct nvkm_enum 1055 gk104_fifo_fault_access[] = { 1056 { 0x0, "READ" }, 1057 { 0x1, "WRITE" }, 1058 {} 1059 }; 1060 1061 const struct nvkm_enum 1062 gk104_fifo_fault_engine[] = { 1063 { 0x00, "GR", NULL, NVKM_ENGINE_GR }, 1064 { 0x01, "DISPLAY" }, 1065 { 0x02, "CAPTURE" }, 1066 { 0x03, "IFB", NULL, NVKM_ENGINE_IFB }, 1067 { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR }, 1068 { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM }, 1069 { 0x06, "SCHED" }, 1070 { 0x07, "HOST0", NULL, NVKM_ENGINE_FIFO }, 1071 { 0x08, "HOST1", NULL, NVKM_ENGINE_FIFO }, 1072 { 0x09, "HOST2", NULL, NVKM_ENGINE_FIFO }, 1073 { 0x0a, "HOST3", NULL, NVKM_ENGINE_FIFO }, 1074 { 0x0b, "HOST4", NULL, NVKM_ENGINE_FIFO }, 1075 { 0x0c, "HOST5", NULL, NVKM_ENGINE_FIFO }, 1076 { 0x0d, "HOST6", NULL, NVKM_ENGINE_FIFO }, 1077 { 0x0e, "HOST7", NULL, NVKM_ENGINE_FIFO }, 1078 { 0x0f, "HOSTSR" }, 1079 { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD }, 1080 { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP }, 1081 { 0x13, "PERF" }, 1082 { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC }, 1083 { 0x15, "CE0", NULL, NVKM_ENGINE_CE0 }, 1084 { 0x16, "CE1", NULL, NVKM_ENGINE_CE1 }, 1085 { 0x17, "PMU" }, 1086 { 0x18, "PTP" }, 1087 { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC }, 1088 { 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 }, 1089 {} 1090 }; 1091 1092 const struct nvkm_enum 1093 gk104_fifo_fault_reason[] = { 1094 { 0x00, "PDE" }, 1095 { 0x01, "PDE_SIZE" }, 1096 { 0x02, "PTE" }, 1097 { 0x03, "VA_LIMIT_VIOLATION" }, 1098 { 0x04, "UNBOUND_INST_BLOCK" }, 1099 { 0x05, "PRIV_VIOLATION" }, 1100 { 0x06, "RO_VIOLATION" }, 1101 { 0x07, "WO_VIOLATION" }, 1102 { 0x08, "PITCH_MASK_VIOLATION" }, 1103 { 0x09, "WORK_CREATION" }, 1104 { 0x0a, "UNSUPPORTED_APERTURE" }, 1105 { 0x0b, "COMPRESSION_FAILURE" }, 1106 { 0x0c, "UNSUPPORTED_KIND" }, 1107 { 0x0d, "REGION_VIOLATION" }, 1108 { 0x0e, "BOTH_PTES_VALID" }, 1109 { 0x0f, "INFO_TYPE_POISONED" }, 1110 {} 1111 }; 1112 1113 const struct nvkm_enum 1114 gk104_fifo_fault_hubclient[] = { 1115 { 0x00, "VIP" }, 1116 { 0x01, "CE0" }, 1117 { 0x02, "CE1" }, 1118 { 0x03, "DNISO" }, 1119 { 0x04, "FE" }, 1120 { 0x05, "FECS" }, 1121 { 0x06, "HOST" }, 1122 { 0x07, "HOST_CPU" }, 1123 { 0x08, "HOST_CPU_NB" }, 1124 { 0x09, "ISO" }, 1125 { 0x0a, "MMU" }, 1126 { 0x0b, "MSPDEC" }, 1127 { 0x0c, "MSPPP" }, 1128 { 0x0d, "MSVLD" }, 1129 { 0x0e, "NISO" }, 1130 { 0x0f, "P2P" }, 1131 { 0x10, "PD" }, 1132 { 0x11, "PERF" }, 1133 { 0x12, "PMU" }, 1134 { 0x13, "RASTERTWOD" }, 1135 { 0x14, "SCC" }, 1136 { 0x15, "SCC_NB" }, 1137 { 0x16, "SEC" }, 1138 { 0x17, "SSYNC" }, 1139 { 0x18, "GR_CE" }, 1140 { 0x19, "CE2" }, 1141 { 0x1a, "XV" }, 1142 { 0x1b, "MMU_NB" }, 1143 { 0x1c, "MSENC" }, 1144 { 0x1d, "DFALCON" }, 1145 { 0x1e, "SKED" }, 1146 { 0x1f, "AFALCON" }, 1147 {} 1148 }; 1149 1150 const struct nvkm_enum 1151 gk104_fifo_fault_gpcclient[] = { 1152 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" }, 1153 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" }, 1154 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" }, 1155 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" }, 1156 { 0x0c, "RAST" }, 1157 { 0x0d, "GCC" }, 1158 { 0x0e, "GPCCS" }, 1159 { 0x0f, "PROP_0" }, 1160 { 0x10, "PROP_1" }, 1161 { 0x11, "PROP_2" }, 1162 { 0x12, "PROP_3" }, 1163 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" }, 1164 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" }, 1165 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" }, 1166 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" }, 1167 { 0x1f, "GPM" }, 1168 { 0x20, "LTP_UTLB_0" }, 1169 { 0x21, "LTP_UTLB_1" }, 1170 { 0x22, "LTP_UTLB_2" }, 1171 { 0x23, "LTP_UTLB_3" }, 1172 { 0x24, "GPC_RGG_UTLB" }, 1173 {} 1174 }; 1175 1176 static const struct gk104_fifo_func 1177 gk104_fifo = { 1178 .fault.access = gk104_fifo_fault_access, 1179 .fault.engine = gk104_fifo_fault_engine, 1180 .fault.reason = gk104_fifo_fault_reason, 1181 .fault.hubclient = gk104_fifo_fault_hubclient, 1182 .fault.gpcclient = gk104_fifo_fault_gpcclient, 1183 .runlist = &gk104_fifo_runlist, 1184 .chan = {{0,0,KEPLER_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new }, 1185 }; 1186 1187 int 1188 gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) 1189 { 1190 return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo); 1191 } 1192