1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "chan.h" 25 #include "chid.h" 26 #include "runl.h" 27 #include "runq.h" 28 29 #include "gk104.h" 30 #include "cgrp.h" 31 #include "changk104.h" 32 33 #include <core/gpuobj.h> 34 #include <subdev/bar.h> 35 #include <subdev/mc.h> 36 #include <subdev/timer.h> 37 #include <subdev/top.h> 38 39 #include <nvif/class.h> 40 41 static const struct nvkm_chan_func 42 gk104_chan = { 43 }; 44 45 void 46 gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn, 47 struct gk104_fifo_engine_status *status) 48 { 49 struct nvkm_engine *engine = fifo->engine[engn].engine; 50 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 51 struct nvkm_device *device = subdev->device; 52 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08)); 53 54 status->busy = !!(stat & 0x80000000); 55 status->faulted = !!(stat & 0x40000000); 56 status->next.tsg = !!(stat & 0x10000000); 57 status->next.id = (stat & 0x0fff0000) >> 16; 58 status->chsw = !!(stat & 0x00008000); 59 status->save = !!(stat & 0x00004000); 60 status->load = !!(stat & 0x00002000); 61 status->prev.tsg = !!(stat & 0x00001000); 62 status->prev.id = (stat & 0x00000fff); 63 status->chan = NULL; 64 65 if (status->busy && status->chsw) { 66 if (status->load && status->save) { 67 if (engine && nvkm_engine_chsw_load(engine)) 68 status->chan = &status->next; 69 else 70 status->chan = &status->prev; 71 } else 72 if (status->load) { 73 status->chan = &status->next; 74 } else { 75 status->chan = &status->prev; 76 } 77 } else 78 if (status->load) { 79 status->chan = &status->prev; 80 } 81 82 nvkm_debug(subdev, "engine %02d: busy %d faulted %d chsw %d " 83 "save %d load %d %sid %d%s-> %sid %d%s\n", 84 engn, status->busy, status->faulted, 85 status->chsw, status->save, status->load, 86 status->prev.tsg ? "tsg" : "ch", status->prev.id, 87 status->chan == &status->prev ? "*" : " ", 88 status->next.tsg ? "tsg" : "ch", status->next.id, 89 status->chan == &status->next ? "*" : " "); 90 } 91 92 const struct nvkm_engn_func 93 gk104_engn = { 94 }; 95 96 const struct nvkm_engn_func 97 gk104_engn_ce = { 98 }; 99 100 static const struct nvkm_bitfield 101 gk104_runq_intr_1_names[] = { 102 { 0x00000001, "HCE_RE_ILLEGAL_OP" }, 103 { 0x00000002, "HCE_RE_ALIGNB" }, 104 { 0x00000004, "HCE_PRIV" }, 105 { 0x00000008, "HCE_ILLEGAL_MTHD" }, 106 { 0x00000010, "HCE_ILLEGAL_CLASS" }, 107 {} 108 }; 109 110 static bool 111 gk104_runq_intr_1(struct nvkm_runq *runq) 112 { 113 struct nvkm_subdev *subdev = &runq->fifo->engine.subdev; 114 struct nvkm_device *device = subdev->device; 115 u32 mask = nvkm_rd32(device, 0x04014c + (runq->id * 0x2000)); 116 u32 stat = nvkm_rd32(device, 0x040148 + (runq->id * 0x2000)) & mask; 117 u32 chid = nvkm_rd32(device, 0x040120 + (runq->id * 0x2000)) & 0xfff; 118 char msg[128]; 119 120 if (stat) { 121 nvkm_snprintbf(msg, sizeof(msg), gk104_runq_intr_1_names, stat); 122 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n", 123 runq->id, stat, msg, chid, 124 nvkm_rd32(device, 0x040150 + (runq->id * 0x2000)), 125 nvkm_rd32(device, 0x040154 + (runq->id * 0x2000))); 126 } 127 128 nvkm_wr32(device, 0x040148 + (runq->id * 0x2000), stat); 129 return true; 130 } 131 132 const struct nvkm_bitfield 133 gk104_runq_intr_0_names[] = { 134 { 0x00000001, "MEMREQ" }, 135 { 0x00000002, "MEMACK_TIMEOUT" }, 136 { 0x00000004, "MEMACK_EXTRA" }, 137 { 0x00000008, "MEMDAT_TIMEOUT" }, 138 { 0x00000010, "MEMDAT_EXTRA" }, 139 { 0x00000020, "MEMFLUSH" }, 140 { 0x00000040, "MEMOP" }, 141 { 0x00000080, "LBCONNECT" }, 142 { 0x00000100, "LBREQ" }, 143 { 0x00000200, "LBACK_TIMEOUT" }, 144 { 0x00000400, "LBACK_EXTRA" }, 145 { 0x00000800, "LBDAT_TIMEOUT" }, 146 { 0x00001000, "LBDAT_EXTRA" }, 147 { 0x00002000, "GPFIFO" }, 148 { 0x00004000, "GPPTR" }, 149 { 0x00008000, "GPENTRY" }, 150 { 0x00010000, "GPCRC" }, 151 { 0x00020000, "PBPTR" }, 152 { 0x00040000, "PBENTRY" }, 153 { 0x00080000, "PBCRC" }, 154 { 0x00100000, "XBARCONNECT" }, 155 { 0x00200000, "METHOD" }, 156 { 0x00400000, "METHODCRC" }, 157 { 0x00800000, "DEVICE" }, 158 { 0x02000000, "SEMAPHORE" }, 159 { 0x04000000, "ACQUIRE" }, 160 { 0x08000000, "PRI" }, 161 { 0x20000000, "NO_CTXSW_SEG" }, 162 { 0x40000000, "PBSEG" }, 163 { 0x80000000, "SIGNATURE" }, 164 {} 165 }; 166 167 bool 168 gk104_runq_intr(struct nvkm_runq *runq, struct nvkm_runl *null) 169 { 170 bool intr0 = gf100_runq_intr(runq, NULL); 171 bool intr1 = gk104_runq_intr_1(runq); 172 173 return intr0 || intr1; 174 } 175 176 void 177 gk104_runq_init(struct nvkm_runq *runq) 178 { 179 struct nvkm_device *device = runq->fifo->engine.subdev.device; 180 181 gf100_runq_init(runq); 182 183 nvkm_wr32(device, 0x040148 + (runq->id * 0x2000), 0xffffffff); /* HCE.INTR */ 184 nvkm_wr32(device, 0x04014c + (runq->id * 0x2000), 0xffffffff); /* HCE.INTREN */ 185 } 186 187 static u32 188 gk104_runq_runm(struct nvkm_runq *runq) 189 { 190 return nvkm_rd32(runq->fifo->engine.subdev.device, 0x002390 + (runq->id * 0x04)); 191 } 192 193 const struct nvkm_runq_func 194 gk104_runq = { 195 .init = gk104_runq_init, 196 .intr = gk104_runq_intr, 197 .intr_0_names = gk104_runq_intr_0_names, 198 }; 199 200 void 201 gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl, 202 struct nvkm_memory *mem, int nr) 203 { 204 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 205 struct nvkm_device *device = subdev->device; 206 int target; 207 208 switch (nvkm_memory_target(mem)) { 209 case NVKM_MEM_TARGET_VRAM: target = 0; break; 210 case NVKM_MEM_TARGET_NCOH: target = 3; break; 211 default: 212 WARN_ON(1); 213 return; 214 } 215 216 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) | 217 (target << 28)); 218 nvkm_wr32(device, 0x002274, (runl << 20) | nr); 219 220 if (nvkm_msec(device, 2000, 221 if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000)) 222 break; 223 ) < 0) 224 nvkm_error(subdev, "runlist %d update timeout\n", runl); 225 } 226 227 void 228 gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl) 229 { 230 const struct gk104_fifo_runlist_func *func = fifo->func->runlist; 231 struct gk104_fifo_chan *chan; 232 struct nvkm_memory *mem; 233 struct nvkm_fifo_cgrp *cgrp; 234 int nr = 0; 235 236 mutex_lock(&fifo->base.mutex); 237 mem = fifo->runlist[runl].mem[fifo->runlist[runl].next]; 238 fifo->runlist[runl].next = !fifo->runlist[runl].next; 239 240 nvkm_kmap(mem); 241 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { 242 func->chan(chan, mem, nr++ * func->size); 243 } 244 245 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) { 246 func->cgrp(cgrp, mem, nr++ * func->size); 247 list_for_each_entry(chan, &cgrp->chan, head) { 248 func->chan(chan, mem, nr++ * func->size); 249 } 250 } 251 nvkm_done(mem); 252 253 func->commit(fifo, runl, mem, nr); 254 mutex_unlock(&fifo->base.mutex); 255 } 256 257 void 258 gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) 259 { 260 struct nvkm_fifo_cgrp *cgrp = chan->cgrp; 261 mutex_lock(&fifo->base.mutex); 262 if (!list_empty(&chan->head)) { 263 list_del_init(&chan->head); 264 if (cgrp && !--cgrp->chan_nr) 265 list_del_init(&cgrp->head); 266 } 267 mutex_unlock(&fifo->base.mutex); 268 } 269 270 void 271 gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) 272 { 273 struct nvkm_fifo_cgrp *cgrp = chan->cgrp; 274 mutex_lock(&fifo->base.mutex); 275 if (cgrp) { 276 if (!cgrp->chan_nr++) 277 list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp); 278 list_add_tail(&chan->head, &cgrp->chan); 279 } else { 280 list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan); 281 } 282 mutex_unlock(&fifo->base.mutex); 283 } 284 285 void 286 gk104_fifo_runlist_chan(struct gk104_fifo_chan *chan, 287 struct nvkm_memory *memory, u32 offset) 288 { 289 nvkm_wo32(memory, offset + 0, chan->base.chid); 290 nvkm_wo32(memory, offset + 4, 0x00000000); 291 } 292 293 const struct gk104_fifo_runlist_func 294 gk104_fifo_runlist = { 295 .size = 8, 296 .chan = gk104_fifo_runlist_chan, 297 .commit = gk104_fifo_runlist_commit, 298 }; 299 300 static const struct nvkm_runl_func 301 gk104_runl = { 302 }; 303 304 int 305 gk104_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine) 306 { 307 struct gk104_fifo *fifo = gk104_fifo(base); 308 int engn; 309 310 if (engine->subdev.type == NVKM_ENGINE_SW) 311 return GK104_FIFO_ENGN_SW; 312 313 for (engn = 0; engn < fifo->engine_nr && engine; engn++) { 314 if (fifo->engine[engn].engine == engine) 315 return engn; 316 } 317 318 WARN_ON(1); 319 return -1; 320 } 321 322 static void 323 gk104_fifo_recover_work(struct work_struct *w) 324 { 325 struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work); 326 struct nvkm_device *device = fifo->base.engine.subdev.device; 327 struct nvkm_engine *engine; 328 unsigned long flags; 329 u32 engm, runm, todo; 330 int engn, runl; 331 332 spin_lock_irqsave(&fifo->base.lock, flags); 333 runm = fifo->recover.runm; 334 engm = fifo->recover.engm; 335 fifo->recover.engm = 0; 336 fifo->recover.runm = 0; 337 spin_unlock_irqrestore(&fifo->base.lock, flags); 338 339 nvkm_mask(device, 0x002630, runm, runm); 340 341 for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) { 342 if ((engine = fifo->engine[engn].engine)) { 343 nvkm_subdev_fini(&engine->subdev, false); 344 WARN_ON(nvkm_subdev_init(&engine->subdev)); 345 } 346 } 347 348 for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl)) 349 gk104_fifo_runlist_update(fifo, runl); 350 351 nvkm_wr32(device, 0x00262c, runm); 352 nvkm_mask(device, 0x002630, runm, 0x00000000); 353 } 354 355 static void gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn); 356 357 static void 358 gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl) 359 { 360 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 361 struct nvkm_device *device = subdev->device; 362 const u32 runm = BIT(runl); 363 364 assert_spin_locked(&fifo->base.lock); 365 if (fifo->recover.runm & runm) 366 return; 367 fifo->recover.runm |= runm; 368 369 /* Block runlist to prevent channel assignment(s) from changing. */ 370 nvkm_mask(device, 0x002630, runm, runm); 371 372 /* Schedule recovery. */ 373 nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl); 374 schedule_work(&fifo->recover.work); 375 } 376 377 static struct gk104_fifo_chan * 378 gk104_fifo_recover_chid(struct gk104_fifo *fifo, int runl, int chid) 379 { 380 struct gk104_fifo_chan *chan; 381 struct nvkm_fifo_cgrp *cgrp; 382 383 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { 384 if (chan->base.chid == chid) { 385 list_del_init(&chan->head); 386 return chan; 387 } 388 } 389 390 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) { 391 if (cgrp->id == chid) { 392 chan = list_first_entry(&cgrp->chan, typeof(*chan), head); 393 list_del_init(&chan->head); 394 if (!--cgrp->chan_nr) 395 list_del_init(&cgrp->head); 396 return chan; 397 } 398 } 399 400 return NULL; 401 } 402 403 void 404 gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid) 405 { 406 struct gk104_fifo *fifo = gk104_fifo(base); 407 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 408 struct nvkm_device *device = subdev->device; 409 const u32 stat = nvkm_rd32(device, 0x800004 + (chid * 0x08)); 410 const u32 runl = (stat & 0x000f0000) >> 16; 411 const bool used = (stat & 0x00000001); 412 unsigned long engn, engm = fifo->runlist[runl].engm; 413 struct gk104_fifo_chan *chan; 414 415 assert_spin_locked(&fifo->base.lock); 416 if (!used) 417 return; 418 419 /* Lookup SW state for channel, and mark it as dead. */ 420 chan = gk104_fifo_recover_chid(fifo, runl, chid); 421 if (chan) { 422 chan->killed = true; 423 nvkm_fifo_kevent(&fifo->base, chid); 424 } 425 426 /* Disable channel. */ 427 nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800); 428 nvkm_warn(subdev, "channel %d: killed\n", chid); 429 430 /* Block channel assignments from changing during recovery. */ 431 gk104_fifo_recover_runl(fifo, runl); 432 433 /* Schedule recovery for any engines the channel is on. */ 434 for_each_set_bit(engn, &engm, fifo->engine_nr) { 435 struct gk104_fifo_engine_status status; 436 gk104_fifo_engine_status(fifo, engn, &status); 437 if (!status.chan || status.chan->id != chid) 438 continue; 439 gk104_fifo_recover_engn(fifo, engn); 440 } 441 } 442 443 static void 444 gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn) 445 { 446 struct nvkm_engine *engine = fifo->engine[engn].engine; 447 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 448 struct nvkm_device *device = subdev->device; 449 const u32 runl = fifo->engine[engn].runl; 450 const u32 engm = BIT(engn); 451 struct gk104_fifo_engine_status status; 452 int mmui = -1; 453 454 assert_spin_locked(&fifo->base.lock); 455 if (fifo->recover.engm & engm) 456 return; 457 fifo->recover.engm |= engm; 458 459 /* Block channel assignments from changing during recovery. */ 460 gk104_fifo_recover_runl(fifo, runl); 461 462 /* Determine which channel (if any) is currently on the engine. */ 463 gk104_fifo_engine_status(fifo, engn, &status); 464 if (status.chan) { 465 /* The channel is not longer viable, kill it. */ 466 gk104_fifo_recover_chan(&fifo->base, status.chan->id); 467 } 468 469 /* Determine MMU fault ID for the engine, if we're not being 470 * called from the fault handler already. 471 */ 472 if (!status.faulted && engine) { 473 mmui = nvkm_top_fault_id(device, engine->subdev.type, engine->subdev.inst); 474 if (mmui < 0) { 475 const struct nvkm_enum *en = fifo->func->mmu_fault->engine; 476 for (; en && en->name; en++) { 477 if (en->data2 == engine->subdev.type && 478 en->inst == engine->subdev.inst) { 479 mmui = en->value; 480 break; 481 } 482 } 483 } 484 WARN_ON(mmui < 0); 485 } 486 487 /* Trigger a MMU fault for the engine. 488 * 489 * No good idea why this is needed, but nvgpu does something similar, 490 * and it makes recovery from CTXSW_TIMEOUT a lot more reliable. 491 */ 492 if (mmui >= 0) { 493 nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000100 | mmui); 494 495 /* Wait for fault to trigger. */ 496 nvkm_msec(device, 2000, 497 gk104_fifo_engine_status(fifo, engn, &status); 498 if (status.faulted) 499 break; 500 ); 501 502 /* Release MMU fault trigger, and ACK the fault. */ 503 nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000000); 504 nvkm_wr32(device, 0x00259c, BIT(mmui)); 505 nvkm_wr32(device, 0x002100, 0x10000000); 506 } 507 508 /* Schedule recovery. */ 509 nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn); 510 schedule_work(&fifo->recover.work); 511 } 512 513 static const struct nvkm_enum 514 gk104_fifo_mmu_fault_engine[] = { 515 { 0x00, "GR", NULL, NVKM_ENGINE_GR }, 516 { 0x01, "DISPLAY" }, 517 { 0x02, "CAPTURE" }, 518 { 0x03, "IFB", NULL, NVKM_ENGINE_IFB }, 519 { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR }, 520 { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM }, 521 { 0x06, "SCHED" }, 522 { 0x07, "HOST0" }, 523 { 0x08, "HOST1" }, 524 { 0x09, "HOST2" }, 525 { 0x0a, "HOST3" }, 526 { 0x0b, "HOST4" }, 527 { 0x0c, "HOST5" }, 528 { 0x0d, "HOST6" }, 529 { 0x0e, "HOST7" }, 530 { 0x0f, "HOSTSR" }, 531 { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD }, 532 { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP }, 533 { 0x13, "PERF" }, 534 { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC }, 535 { 0x15, "CE0", NULL, NVKM_ENGINE_CE, 0 }, 536 { 0x16, "CE1", NULL, NVKM_ENGINE_CE, 1 }, 537 { 0x17, "PMU" }, 538 { 0x18, "PTP" }, 539 { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC }, 540 { 0x1b, "CE2", NULL, NVKM_ENGINE_CE, 2 }, 541 {} 542 }; 543 544 const struct nvkm_enum 545 gk104_fifo_mmu_fault_reason[] = { 546 { 0x00, "PDE" }, 547 { 0x01, "PDE_SIZE" }, 548 { 0x02, "PTE" }, 549 { 0x03, "VA_LIMIT_VIOLATION" }, 550 { 0x04, "UNBOUND_INST_BLOCK" }, 551 { 0x05, "PRIV_VIOLATION" }, 552 { 0x06, "RO_VIOLATION" }, 553 { 0x07, "WO_VIOLATION" }, 554 { 0x08, "PITCH_MASK_VIOLATION" }, 555 { 0x09, "WORK_CREATION" }, 556 { 0x0a, "UNSUPPORTED_APERTURE" }, 557 { 0x0b, "COMPRESSION_FAILURE" }, 558 { 0x0c, "UNSUPPORTED_KIND" }, 559 { 0x0d, "REGION_VIOLATION" }, 560 { 0x0e, "BOTH_PTES_VALID" }, 561 { 0x0f, "INFO_TYPE_POISONED" }, 562 {} 563 }; 564 565 const struct nvkm_enum 566 gk104_fifo_mmu_fault_hubclient[] = { 567 { 0x00, "VIP" }, 568 { 0x01, "CE0" }, 569 { 0x02, "CE1" }, 570 { 0x03, "DNISO" }, 571 { 0x04, "FE" }, 572 { 0x05, "FECS" }, 573 { 0x06, "HOST" }, 574 { 0x07, "HOST_CPU" }, 575 { 0x08, "HOST_CPU_NB" }, 576 { 0x09, "ISO" }, 577 { 0x0a, "MMU" }, 578 { 0x0b, "MSPDEC" }, 579 { 0x0c, "MSPPP" }, 580 { 0x0d, "MSVLD" }, 581 { 0x0e, "NISO" }, 582 { 0x0f, "P2P" }, 583 { 0x10, "PD" }, 584 { 0x11, "PERF" }, 585 { 0x12, "PMU" }, 586 { 0x13, "RASTERTWOD" }, 587 { 0x14, "SCC" }, 588 { 0x15, "SCC_NB" }, 589 { 0x16, "SEC" }, 590 { 0x17, "SSYNC" }, 591 { 0x18, "GR_CE" }, 592 { 0x19, "CE2" }, 593 { 0x1a, "XV" }, 594 { 0x1b, "MMU_NB" }, 595 { 0x1c, "MSENC" }, 596 { 0x1d, "DFALCON" }, 597 { 0x1e, "SKED" }, 598 { 0x1f, "AFALCON" }, 599 {} 600 }; 601 602 const struct nvkm_enum 603 gk104_fifo_mmu_fault_gpcclient[] = { 604 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" }, 605 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" }, 606 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" }, 607 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" }, 608 { 0x0c, "RAST" }, 609 { 0x0d, "GCC" }, 610 { 0x0e, "GPCCS" }, 611 { 0x0f, "PROP_0" }, 612 { 0x10, "PROP_1" }, 613 { 0x11, "PROP_2" }, 614 { 0x12, "PROP_3" }, 615 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" }, 616 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" }, 617 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" }, 618 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" }, 619 { 0x1f, "GPM" }, 620 { 0x20, "LTP_UTLB_0" }, 621 { 0x21, "LTP_UTLB_1" }, 622 { 0x22, "LTP_UTLB_2" }, 623 { 0x23, "LTP_UTLB_3" }, 624 { 0x24, "GPC_RGG_UTLB" }, 625 {} 626 }; 627 628 const struct nvkm_fifo_func_mmu_fault 629 gk104_fifo_mmu_fault = { 630 .recover = gf100_fifo_mmu_fault_recover, 631 .access = gf100_fifo_mmu_fault_access, 632 .engine = gk104_fifo_mmu_fault_engine, 633 .reason = gk104_fifo_mmu_fault_reason, 634 .hubclient = gk104_fifo_mmu_fault_hubclient, 635 .gpcclient = gk104_fifo_mmu_fault_gpcclient, 636 }; 637 638 static const struct nvkm_enum 639 gk104_fifo_intr_bind_reason[] = { 640 { 0x01, "BIND_NOT_UNBOUND" }, 641 { 0x02, "SNOOP_WITHOUT_BAR1" }, 642 { 0x03, "UNBIND_WHILE_RUNNING" }, 643 { 0x05, "INVALID_RUNLIST" }, 644 { 0x06, "INVALID_CTX_TGT" }, 645 { 0x0b, "UNBIND_WHILE_PARKED" }, 646 {} 647 }; 648 649 void 650 gk104_fifo_intr_bind(struct nvkm_fifo *fifo) 651 { 652 struct nvkm_subdev *subdev = &fifo->engine.subdev; 653 u32 intr = nvkm_rd32(subdev->device, 0x00252c); 654 u32 code = intr & 0x000000ff; 655 const struct nvkm_enum *en = nvkm_enum_find(gk104_fifo_intr_bind_reason, code); 656 657 nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : ""); 658 } 659 660 static const struct nvkm_enum 661 gk104_fifo_sched_reason[] = { 662 { 0x0a, "CTXSW_TIMEOUT" }, 663 {} 664 }; 665 666 static void 667 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) 668 { 669 struct nvkm_device *device = fifo->base.engine.subdev.device; 670 unsigned long flags, engm = 0; 671 u32 engn; 672 673 /* We need to ACK the SCHED_ERROR here, and prevent it reasserting, 674 * as MMU_FAULT cannot be triggered while it's pending. 675 */ 676 spin_lock_irqsave(&fifo->base.lock, flags); 677 nvkm_mask(device, 0x002140, 0x00000100, 0x00000000); 678 nvkm_wr32(device, 0x002100, 0x00000100); 679 680 for (engn = 0; engn < fifo->engine_nr; engn++) { 681 struct gk104_fifo_engine_status status; 682 683 gk104_fifo_engine_status(fifo, engn, &status); 684 if (!status.busy || !status.chsw) 685 continue; 686 687 engm |= BIT(engn); 688 } 689 690 for_each_set_bit(engn, &engm, fifo->engine_nr) 691 gk104_fifo_recover_engn(fifo, engn); 692 693 nvkm_mask(device, 0x002140, 0x00000100, 0x00000100); 694 spin_unlock_irqrestore(&fifo->base.lock, flags); 695 } 696 697 static void 698 gk104_fifo_intr_sched(struct gk104_fifo *fifo) 699 { 700 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 701 struct nvkm_device *device = subdev->device; 702 u32 intr = nvkm_rd32(device, 0x00254c); 703 u32 code = intr & 0x000000ff; 704 const struct nvkm_enum *en = 705 nvkm_enum_find(gk104_fifo_sched_reason, code); 706 707 nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : ""); 708 709 switch (code) { 710 case 0x0a: 711 gk104_fifo_intr_sched_ctxsw(fifo); 712 break; 713 default: 714 break; 715 } 716 } 717 718 void 719 gk104_fifo_intr_chsw(struct nvkm_fifo *fifo) 720 { 721 struct nvkm_subdev *subdev = &fifo->engine.subdev; 722 struct nvkm_device *device = subdev->device; 723 u32 stat = nvkm_rd32(device, 0x00256c); 724 725 nvkm_error(subdev, "CHSW_ERROR %08x\n", stat); 726 nvkm_wr32(device, 0x00256c, stat); 727 } 728 729 static void 730 gk104_fifo_intr_dropped_fault(struct nvkm_fifo *fifo) 731 { 732 struct nvkm_subdev *subdev = &fifo->engine.subdev; 733 u32 stat = nvkm_rd32(subdev->device, 0x00259c); 734 735 nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat); 736 } 737 738 void 739 gk104_fifo_intr_runlist(struct gk104_fifo *fifo) 740 { 741 struct nvkm_device *device = fifo->base.engine.subdev.device; 742 u32 mask = nvkm_rd32(device, 0x002a00); 743 while (mask) { 744 int runl = __ffs(mask); 745 wake_up(&fifo->runlist[runl].wait); 746 nvkm_wr32(device, 0x002a00, 1 << runl); 747 mask &= ~(1 << runl); 748 } 749 } 750 751 irqreturn_t 752 gk104_fifo_intr(struct nvkm_inth *inth) 753 { 754 struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth); 755 struct nvkm_subdev *subdev = &fifo->engine.subdev; 756 struct nvkm_device *device = subdev->device; 757 u32 mask = nvkm_rd32(device, 0x002140); 758 u32 stat = nvkm_rd32(device, 0x002100) & mask; 759 760 if (stat & 0x00000001) { 761 gk104_fifo_intr_bind(fifo); 762 nvkm_wr32(device, 0x002100, 0x00000001); 763 stat &= ~0x00000001; 764 } 765 766 if (stat & 0x00000010) { 767 nvkm_error(subdev, "PIO_ERROR\n"); 768 nvkm_wr32(device, 0x002100, 0x00000010); 769 stat &= ~0x00000010; 770 } 771 772 if (stat & 0x00000100) { 773 gk104_fifo_intr_sched(gk104_fifo(fifo)); 774 nvkm_wr32(device, 0x002100, 0x00000100); 775 stat &= ~0x00000100; 776 } 777 778 if (stat & 0x00010000) { 779 gk104_fifo_intr_chsw(fifo); 780 nvkm_wr32(device, 0x002100, 0x00010000); 781 stat &= ~0x00010000; 782 } 783 784 if (stat & 0x00800000) { 785 nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n"); 786 nvkm_wr32(device, 0x002100, 0x00800000); 787 stat &= ~0x00800000; 788 } 789 790 if (stat & 0x01000000) { 791 nvkm_error(subdev, "LB_ERROR\n"); 792 nvkm_wr32(device, 0x002100, 0x01000000); 793 stat &= ~0x01000000; 794 } 795 796 if (stat & 0x08000000) { 797 gk104_fifo_intr_dropped_fault(fifo); 798 nvkm_wr32(device, 0x002100, 0x08000000); 799 stat &= ~0x08000000; 800 } 801 802 if (stat & 0x10000000) { 803 gf100_fifo_intr_mmu_fault(fifo); 804 stat &= ~0x10000000; 805 } 806 807 if (stat & 0x20000000) { 808 if (gf100_fifo_intr_pbdma(fifo)) 809 stat &= ~0x20000000; 810 } 811 812 if (stat & 0x40000000) { 813 gk104_fifo_intr_runlist(gk104_fifo(fifo)); 814 stat &= ~0x40000000; 815 } 816 817 if (stat & 0x80000000) { 818 nvkm_wr32(device, 0x002100, 0x80000000); 819 nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT); 820 stat &= ~0x80000000; 821 } 822 823 if (stat) { 824 nvkm_error(subdev, "INTR %08x\n", stat); 825 spin_lock(&fifo->lock); 826 nvkm_mask(device, 0x002140, stat, 0x00000000); 827 spin_unlock(&fifo->lock); 828 nvkm_wr32(device, 0x002100, stat); 829 } 830 831 return IRQ_HANDLED; 832 } 833 834 void 835 gk104_fifo_fini(struct nvkm_fifo *base) 836 { 837 struct gk104_fifo *fifo = gk104_fifo(base); 838 flush_work(&fifo->recover.work); 839 } 840 841 void 842 gk104_fifo_init_pbdmas(struct nvkm_fifo *fifo, u32 mask) 843 { 844 struct nvkm_device *device = fifo->engine.subdev.device; 845 846 nvkm_wr32(device, 0x000204, mask); 847 nvkm_mask(device, 0x002a04, 0xbfffffff, 0xbfffffff); 848 } 849 850 void 851 gk104_fifo_init(struct nvkm_fifo *base) 852 { 853 struct gk104_fifo *fifo = gk104_fifo(base); 854 struct nvkm_device *device = fifo->base.engine.subdev.device; 855 856 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12); 857 858 nvkm_wr32(device, 0x002100, 0xffffffff); 859 nvkm_wr32(device, 0x002140, 0x7fffffff); 860 } 861 862 int 863 gk104_fifo_runl_ctor(struct nvkm_fifo *fifo) 864 { 865 struct nvkm_device *device = fifo->engine.subdev.device; 866 struct nvkm_top_device *tdev; 867 struct nvkm_runl *runl; 868 struct nvkm_runq *runq; 869 const struct nvkm_engn_func *func; 870 871 nvkm_list_foreach(tdev, &device->top->device, head, tdev->runlist >= 0) { 872 runl = nvkm_runl_get(fifo, tdev->runlist, tdev->runlist); 873 if (!runl) { 874 runl = nvkm_runl_new(fifo, tdev->runlist, tdev->runlist, 0); 875 if (IS_ERR(runl)) 876 return PTR_ERR(runl); 877 878 nvkm_runq_foreach_cond(runq, fifo, gk104_runq_runm(runq) & BIT(runl->id)) { 879 if (WARN_ON(runl->runq_nr == ARRAY_SIZE(runl->runq))) 880 return -ENOMEM; 881 882 runl->runq[runl->runq_nr++] = runq; 883 } 884 885 } 886 887 if (tdev->engine < 0) 888 continue; 889 890 switch (tdev->type) { 891 case NVKM_ENGINE_CE: 892 func = fifo->func->engn_ce; 893 break; 894 case NVKM_ENGINE_GR: 895 nvkm_runl_add(runl, 15, &gf100_engn_sw, NVKM_ENGINE_SW, 0); 896 fallthrough; 897 default: 898 func = fifo->func->engn; 899 break; 900 } 901 902 nvkm_runl_add(runl, tdev->engine, func, tdev->type, tdev->inst); 903 } 904 905 return 0; 906 } 907 908 int 909 gk104_fifo_chid_nr(struct nvkm_fifo *fifo) 910 { 911 return 4096; 912 } 913 914 int 915 gk104_fifo_oneinit(struct nvkm_fifo *base) 916 { 917 struct gk104_fifo *fifo = gk104_fifo(base); 918 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 919 struct nvkm_device *device = subdev->device; 920 struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device); 921 struct nvkm_top_device *tdev; 922 int ret, i, j; 923 924 /* Determine runlist configuration from topology device info. */ 925 list_for_each_entry(tdev, &device->top->device, head) { 926 const int engn = tdev->engine; 927 928 if (engn < 0) 929 continue; 930 931 fifo->engine[engn].engine = nvkm_device_engine(device, tdev->type, tdev->inst); 932 fifo->engine[engn].runl = tdev->runlist; 933 fifo->engine_nr = max(fifo->engine_nr, engn + 1); 934 fifo->runlist[tdev->runlist].engm |= BIT(engn); 935 fifo->runlist[tdev->runlist].engm_sw |= BIT(engn); 936 if (tdev->type == NVKM_ENGINE_GR) 937 fifo->runlist[tdev->runlist].engm_sw |= BIT(GK104_FIFO_ENGN_SW); 938 fifo->runlist_nr = max(fifo->runlist_nr, tdev->runlist + 1); 939 } 940 941 for (i = 0; i < fifo->runlist_nr; i++) { 942 for (j = 0; j < ARRAY_SIZE(fifo->runlist[i].mem); j++) { 943 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 944 fifo->base.nr * 2/* TSG+chan */ * 945 fifo->func->runlist->size, 946 0x1000, false, 947 &fifo->runlist[i].mem[j]); 948 if (ret) 949 return ret; 950 } 951 952 init_waitqueue_head(&fifo->runlist[i].wait); 953 INIT_LIST_HEAD(&fifo->runlist[i].cgrp); 954 INIT_LIST_HEAD(&fifo->runlist[i].chan); 955 } 956 957 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 958 fifo->base.nr * 0x200, 0x1000, true, 959 &fifo->user.mem); 960 if (ret) 961 return ret; 962 963 ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem), 964 &fifo->user.bar); 965 if (ret) 966 return ret; 967 968 return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0); 969 } 970 971 void * 972 gk104_fifo_dtor(struct nvkm_fifo *base) 973 { 974 struct gk104_fifo *fifo = gk104_fifo(base); 975 struct nvkm_device *device = fifo->base.engine.subdev.device; 976 int i; 977 978 nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar); 979 nvkm_memory_unref(&fifo->user.mem); 980 981 for (i = 0; i < fifo->runlist_nr; i++) { 982 nvkm_memory_unref(&fifo->runlist[i].mem[1]); 983 nvkm_memory_unref(&fifo->runlist[i].mem[0]); 984 } 985 986 return fifo; 987 } 988 989 int 990 gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device, 991 enum nvkm_subdev_type type, int inst, int nr, struct nvkm_fifo **pfifo) 992 { 993 struct gk104_fifo *fifo; 994 995 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) 996 return -ENOMEM; 997 fifo->func = func; 998 INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work); 999 *pfifo = &fifo->base; 1000 1001 return nvkm_fifo_ctor(func, device, type, inst, &fifo->base); 1002 } 1003 1004 static const struct nvkm_fifo_func 1005 gk104_fifo = { 1006 .dtor = gk104_fifo_dtor, 1007 .oneinit = gk104_fifo_oneinit, 1008 .chid_nr = gk104_fifo_chid_nr, 1009 .chid_ctor = gf100_fifo_chid_ctor, 1010 .runq_nr = gf100_fifo_runq_nr, 1011 .runl_ctor = gk104_fifo_runl_ctor, 1012 .init = gk104_fifo_init, 1013 .init_pbdmas = gk104_fifo_init_pbdmas, 1014 .fini = gk104_fifo_fini, 1015 .intr = gk104_fifo_intr, 1016 .intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit, 1017 .mmu_fault = &gk104_fifo_mmu_fault, 1018 .engine_id = gk104_fifo_engine_id, 1019 .recover_chan = gk104_fifo_recover_chan, 1020 .runlist = &gk104_fifo_runlist, 1021 .nonstall = &gf100_fifo_nonstall, 1022 .runl = &gk104_runl, 1023 .runq = &gk104_runq, 1024 .engn = &gk104_engn, 1025 .engn_ce = &gk104_engn_ce, 1026 .cgrp = {{ }, &nv04_cgrp }, 1027 .chan = {{ 0, 0, KEPLER_CHANNEL_GPFIFO_A }, &gk104_chan, .ctor = &gk104_fifo_gpfifo_new }, 1028 }; 1029 1030 int 1031 gk104_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, 1032 struct nvkm_fifo **pfifo) 1033 { 1034 return gk104_fifo_new_(&gk104_fifo, device, type, inst, 0, pfifo); 1035 } 1036