1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "chan.h" 25 #include "chid.h" 26 #include "runl.h" 27 #include "runq.h" 28 29 #include "gk104.h" 30 #include "cgrp.h" 31 #include "changk104.h" 32 33 #include <core/gpuobj.h> 34 #include <subdev/bar.h> 35 #include <subdev/mc.h> 36 #include <subdev/timer.h> 37 #include <subdev/top.h> 38 39 #include <nvif/class.h> 40 41 void 42 gk104_chan_stop(struct nvkm_chan *chan) 43 { 44 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; 45 46 nvkm_mask(device, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800); 47 } 48 49 void 50 gk104_chan_start(struct nvkm_chan *chan) 51 { 52 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; 53 54 nvkm_mask(device, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400); 55 } 56 57 void 58 gk104_chan_unbind(struct nvkm_chan *chan) 59 { 60 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; 61 62 nvkm_wr32(device, 0x800000 + (chan->id * 8), 0x00000000); 63 } 64 65 void 66 gk104_chan_bind_inst(struct nvkm_chan *chan) 67 { 68 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; 69 70 nvkm_wr32(device, 0x800000 + (chan->id * 8), 0x80000000 | chan->inst->addr >> 12); 71 } 72 73 void 74 gk104_chan_bind(struct nvkm_chan *chan) 75 { 76 struct nvkm_runl *runl = chan->cgrp->runl; 77 struct nvkm_device *device = runl->fifo->engine.subdev.device; 78 79 nvkm_mask(device, 0x800004 + (chan->id * 8), 0x000f0000, runl->id << 16); 80 gk104_chan_bind_inst(chan); 81 } 82 83 static const struct nvkm_chan_func 84 gk104_chan = { 85 .bind = gk104_chan_bind, 86 .unbind = gk104_chan_unbind, 87 .start = gk104_chan_start, 88 .stop = gk104_chan_stop, 89 .preempt = gf100_chan_preempt, 90 }; 91 92 void 93 gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn, 94 struct gk104_fifo_engine_status *status) 95 { 96 struct nvkm_engine *engine = fifo->engine[engn].engine; 97 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 98 struct nvkm_device *device = subdev->device; 99 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08)); 100 101 status->busy = !!(stat & 0x80000000); 102 status->faulted = !!(stat & 0x40000000); 103 status->next.tsg = !!(stat & 0x10000000); 104 status->next.id = (stat & 0x0fff0000) >> 16; 105 status->chsw = !!(stat & 0x00008000); 106 status->save = !!(stat & 0x00004000); 107 status->load = !!(stat & 0x00002000); 108 status->prev.tsg = !!(stat & 0x00001000); 109 status->prev.id = (stat & 0x00000fff); 110 status->chan = NULL; 111 112 if (status->busy && status->chsw) { 113 if (status->load && status->save) { 114 if (engine && nvkm_engine_chsw_load(engine)) 115 status->chan = &status->next; 116 else 117 status->chan = &status->prev; 118 } else 119 if (status->load) { 120 status->chan = &status->next; 121 } else { 122 status->chan = &status->prev; 123 } 124 } else 125 if (status->load) { 126 status->chan = &status->prev; 127 } 128 129 nvkm_debug(subdev, "engine %02d: busy %d faulted %d chsw %d " 130 "save %d load %d %sid %d%s-> %sid %d%s\n", 131 engn, status->busy, status->faulted, 132 status->chsw, status->save, status->load, 133 status->prev.tsg ? "tsg" : "ch", status->prev.id, 134 status->chan == &status->prev ? "*" : " ", 135 status->next.tsg ? "tsg" : "ch", status->next.id, 136 status->chan == &status->next ? "*" : " "); 137 } 138 139 const struct nvkm_engn_func 140 gk104_engn = { 141 }; 142 143 const struct nvkm_engn_func 144 gk104_engn_ce = { 145 }; 146 147 static const struct nvkm_bitfield 148 gk104_runq_intr_1_names[] = { 149 { 0x00000001, "HCE_RE_ILLEGAL_OP" }, 150 { 0x00000002, "HCE_RE_ALIGNB" }, 151 { 0x00000004, "HCE_PRIV" }, 152 { 0x00000008, "HCE_ILLEGAL_MTHD" }, 153 { 0x00000010, "HCE_ILLEGAL_CLASS" }, 154 {} 155 }; 156 157 static bool 158 gk104_runq_intr_1(struct nvkm_runq *runq) 159 { 160 struct nvkm_subdev *subdev = &runq->fifo->engine.subdev; 161 struct nvkm_device *device = subdev->device; 162 u32 mask = nvkm_rd32(device, 0x04014c + (runq->id * 0x2000)); 163 u32 stat = nvkm_rd32(device, 0x040148 + (runq->id * 0x2000)) & mask; 164 u32 chid = nvkm_rd32(device, 0x040120 + (runq->id * 0x2000)) & 0xfff; 165 char msg[128]; 166 167 if (stat) { 168 nvkm_snprintbf(msg, sizeof(msg), gk104_runq_intr_1_names, stat); 169 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n", 170 runq->id, stat, msg, chid, 171 nvkm_rd32(device, 0x040150 + (runq->id * 0x2000)), 172 nvkm_rd32(device, 0x040154 + (runq->id * 0x2000))); 173 } 174 175 nvkm_wr32(device, 0x040148 + (runq->id * 0x2000), stat); 176 return true; 177 } 178 179 const struct nvkm_bitfield 180 gk104_runq_intr_0_names[] = { 181 { 0x00000001, "MEMREQ" }, 182 { 0x00000002, "MEMACK_TIMEOUT" }, 183 { 0x00000004, "MEMACK_EXTRA" }, 184 { 0x00000008, "MEMDAT_TIMEOUT" }, 185 { 0x00000010, "MEMDAT_EXTRA" }, 186 { 0x00000020, "MEMFLUSH" }, 187 { 0x00000040, "MEMOP" }, 188 { 0x00000080, "LBCONNECT" }, 189 { 0x00000100, "LBREQ" }, 190 { 0x00000200, "LBACK_TIMEOUT" }, 191 { 0x00000400, "LBACK_EXTRA" }, 192 { 0x00000800, "LBDAT_TIMEOUT" }, 193 { 0x00001000, "LBDAT_EXTRA" }, 194 { 0x00002000, "GPFIFO" }, 195 { 0x00004000, "GPPTR" }, 196 { 0x00008000, "GPENTRY" }, 197 { 0x00010000, "GPCRC" }, 198 { 0x00020000, "PBPTR" }, 199 { 0x00040000, "PBENTRY" }, 200 { 0x00080000, "PBCRC" }, 201 { 0x00100000, "XBARCONNECT" }, 202 { 0x00200000, "METHOD" }, 203 { 0x00400000, "METHODCRC" }, 204 { 0x00800000, "DEVICE" }, 205 { 0x02000000, "SEMAPHORE" }, 206 { 0x04000000, "ACQUIRE" }, 207 { 0x08000000, "PRI" }, 208 { 0x20000000, "NO_CTXSW_SEG" }, 209 { 0x40000000, "PBSEG" }, 210 { 0x80000000, "SIGNATURE" }, 211 {} 212 }; 213 214 bool 215 gk104_runq_intr(struct nvkm_runq *runq, struct nvkm_runl *null) 216 { 217 bool intr0 = gf100_runq_intr(runq, NULL); 218 bool intr1 = gk104_runq_intr_1(runq); 219 220 return intr0 || intr1; 221 } 222 223 void 224 gk104_runq_init(struct nvkm_runq *runq) 225 { 226 struct nvkm_device *device = runq->fifo->engine.subdev.device; 227 228 gf100_runq_init(runq); 229 230 nvkm_wr32(device, 0x040148 + (runq->id * 0x2000), 0xffffffff); /* HCE.INTR */ 231 nvkm_wr32(device, 0x04014c + (runq->id * 0x2000), 0xffffffff); /* HCE.INTREN */ 232 } 233 234 static u32 235 gk104_runq_runm(struct nvkm_runq *runq) 236 { 237 return nvkm_rd32(runq->fifo->engine.subdev.device, 0x002390 + (runq->id * 0x04)); 238 } 239 240 const struct nvkm_runq_func 241 gk104_runq = { 242 .init = gk104_runq_init, 243 .intr = gk104_runq_intr, 244 .intr_0_names = gk104_runq_intr_0_names, 245 }; 246 247 void 248 gk104_runl_allow(struct nvkm_runl *runl, u32 engm) 249 { 250 nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), 0x00000000); 251 } 252 253 void 254 gk104_runl_block(struct nvkm_runl *runl, u32 engm) 255 { 256 nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), BIT(runl->id)); 257 } 258 259 bool 260 gk104_runl_pending(struct nvkm_runl *runl) 261 { 262 struct nvkm_device *device = runl->fifo->engine.subdev.device; 263 264 return nvkm_rd32(device, 0x002284 + (runl->id * 0x08)) & 0x00100000; 265 } 266 267 void 268 gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl, 269 struct nvkm_memory *mem, int nr) 270 { 271 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 272 struct nvkm_device *device = subdev->device; 273 struct nvkm_runl *rl = nvkm_runl_get(&fifo->base, runl, 0); 274 int target; 275 276 switch (nvkm_memory_target(mem)) { 277 case NVKM_MEM_TARGET_VRAM: target = 0; break; 278 case NVKM_MEM_TARGET_NCOH: target = 3; break; 279 default: 280 WARN_ON(1); 281 return; 282 } 283 284 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) | 285 (target << 28)); 286 nvkm_wr32(device, 0x002274, (runl << 20) | nr); 287 288 rl->func->wait(rl); 289 } 290 291 void 292 gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl) 293 { 294 const struct gk104_fifo_runlist_func *func = fifo->func->runlist; 295 struct gk104_fifo_chan *chan; 296 struct nvkm_memory *mem; 297 struct nvkm_fifo_cgrp *cgrp; 298 int nr = 0; 299 300 mutex_lock(&fifo->base.mutex); 301 mem = fifo->runlist[runl].mem[fifo->runlist[runl].next]; 302 fifo->runlist[runl].next = !fifo->runlist[runl].next; 303 304 nvkm_kmap(mem); 305 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { 306 func->chan(chan, mem, nr++ * func->size); 307 } 308 309 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) { 310 func->cgrp(cgrp, mem, nr++ * func->size); 311 list_for_each_entry(chan, &cgrp->chan, head) { 312 func->chan(chan, mem, nr++ * func->size); 313 } 314 } 315 nvkm_done(mem); 316 317 func->commit(fifo, runl, mem, nr); 318 mutex_unlock(&fifo->base.mutex); 319 } 320 321 void 322 gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) 323 { 324 struct nvkm_fifo_cgrp *cgrp = chan->cgrp; 325 mutex_lock(&fifo->base.mutex); 326 if (!list_empty(&chan->head)) { 327 list_del_init(&chan->head); 328 if (cgrp && !--cgrp->chan_nr) 329 list_del_init(&cgrp->head); 330 } 331 mutex_unlock(&fifo->base.mutex); 332 } 333 334 void 335 gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) 336 { 337 struct nvkm_fifo_cgrp *cgrp = chan->cgrp; 338 mutex_lock(&fifo->base.mutex); 339 if (cgrp) { 340 if (!cgrp->chan_nr++) 341 list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp); 342 list_add_tail(&chan->head, &cgrp->chan); 343 } else { 344 list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan); 345 } 346 mutex_unlock(&fifo->base.mutex); 347 } 348 349 void 350 gk104_fifo_runlist_chan(struct gk104_fifo_chan *chan, 351 struct nvkm_memory *memory, u32 offset) 352 { 353 nvkm_wo32(memory, offset + 0, chan->base.chid); 354 nvkm_wo32(memory, offset + 4, 0x00000000); 355 } 356 357 const struct gk104_fifo_runlist_func 358 gk104_fifo_runlist = { 359 .size = 8, 360 .chan = gk104_fifo_runlist_chan, 361 .commit = gk104_fifo_runlist_commit, 362 }; 363 364 static const struct nvkm_runl_func 365 gk104_runl = { 366 .wait = nv50_runl_wait, 367 .pending = gk104_runl_pending, 368 .block = gk104_runl_block, 369 .allow = gk104_runl_allow, 370 .preempt_pending = gf100_runl_preempt_pending, 371 }; 372 373 int 374 gk104_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine) 375 { 376 struct gk104_fifo *fifo = gk104_fifo(base); 377 int engn; 378 379 if (engine->subdev.type == NVKM_ENGINE_SW) 380 return GK104_FIFO_ENGN_SW; 381 382 for (engn = 0; engn < fifo->engine_nr && engine; engn++) { 383 if (fifo->engine[engn].engine == engine) 384 return engn; 385 } 386 387 WARN_ON(1); 388 return -1; 389 } 390 391 static void 392 gk104_fifo_recover_work(struct work_struct *w) 393 { 394 struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work); 395 struct nvkm_device *device = fifo->base.engine.subdev.device; 396 struct nvkm_engine *engine; 397 unsigned long flags; 398 u32 engm, runm, todo; 399 int engn, runl; 400 401 spin_lock_irqsave(&fifo->base.lock, flags); 402 runm = fifo->recover.runm; 403 engm = fifo->recover.engm; 404 fifo->recover.engm = 0; 405 fifo->recover.runm = 0; 406 spin_unlock_irqrestore(&fifo->base.lock, flags); 407 408 nvkm_mask(device, 0x002630, runm, runm); 409 410 for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) { 411 if ((engine = fifo->engine[engn].engine)) { 412 nvkm_subdev_fini(&engine->subdev, false); 413 WARN_ON(nvkm_subdev_init(&engine->subdev)); 414 } 415 } 416 417 for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl)) 418 gk104_fifo_runlist_update(fifo, runl); 419 420 nvkm_wr32(device, 0x00262c, runm); 421 nvkm_mask(device, 0x002630, runm, 0x00000000); 422 } 423 424 static void gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn); 425 426 static void 427 gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl) 428 { 429 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 430 struct nvkm_device *device = subdev->device; 431 const u32 runm = BIT(runl); 432 433 assert_spin_locked(&fifo->base.lock); 434 if (fifo->recover.runm & runm) 435 return; 436 fifo->recover.runm |= runm; 437 438 /* Block runlist to prevent channel assignment(s) from changing. */ 439 nvkm_mask(device, 0x002630, runm, runm); 440 441 /* Schedule recovery. */ 442 nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl); 443 schedule_work(&fifo->recover.work); 444 } 445 446 static struct gk104_fifo_chan * 447 gk104_fifo_recover_chid(struct gk104_fifo *fifo, int runl, int chid) 448 { 449 struct gk104_fifo_chan *chan; 450 struct nvkm_fifo_cgrp *cgrp; 451 452 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { 453 if (chan->base.chid == chid) { 454 list_del_init(&chan->head); 455 return chan; 456 } 457 } 458 459 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) { 460 if (cgrp->id == chid) { 461 chan = list_first_entry(&cgrp->chan, typeof(*chan), head); 462 list_del_init(&chan->head); 463 if (!--cgrp->chan_nr) 464 list_del_init(&cgrp->head); 465 return chan; 466 } 467 } 468 469 return NULL; 470 } 471 472 void 473 gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid) 474 { 475 struct gk104_fifo *fifo = gk104_fifo(base); 476 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 477 struct nvkm_device *device = subdev->device; 478 const u32 stat = nvkm_rd32(device, 0x800004 + (chid * 0x08)); 479 const u32 runl = (stat & 0x000f0000) >> 16; 480 const bool used = (stat & 0x00000001); 481 unsigned long engn, engm = fifo->runlist[runl].engm; 482 struct gk104_fifo_chan *chan; 483 484 assert_spin_locked(&fifo->base.lock); 485 if (!used) 486 return; 487 488 /* Lookup SW state for channel, and mark it as dead. */ 489 chan = gk104_fifo_recover_chid(fifo, runl, chid); 490 if (chan) { 491 chan->killed = true; 492 nvkm_chan_error(&chan->base, false); 493 } 494 495 /* Block channel assignments from changing during recovery. */ 496 gk104_fifo_recover_runl(fifo, runl); 497 498 /* Schedule recovery for any engines the channel is on. */ 499 for_each_set_bit(engn, &engm, fifo->engine_nr) { 500 struct gk104_fifo_engine_status status; 501 gk104_fifo_engine_status(fifo, engn, &status); 502 if (!status.chan || status.chan->id != chid) 503 continue; 504 gk104_fifo_recover_engn(fifo, engn); 505 } 506 } 507 508 static void 509 gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn) 510 { 511 struct nvkm_engine *engine = fifo->engine[engn].engine; 512 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 513 struct nvkm_device *device = subdev->device; 514 const u32 runl = fifo->engine[engn].runl; 515 const u32 engm = BIT(engn); 516 struct gk104_fifo_engine_status status; 517 int mmui = -1; 518 519 assert_spin_locked(&fifo->base.lock); 520 if (fifo->recover.engm & engm) 521 return; 522 fifo->recover.engm |= engm; 523 524 /* Block channel assignments from changing during recovery. */ 525 gk104_fifo_recover_runl(fifo, runl); 526 527 /* Determine which channel (if any) is currently on the engine. */ 528 gk104_fifo_engine_status(fifo, engn, &status); 529 if (status.chan) { 530 /* The channel is not longer viable, kill it. */ 531 gk104_fifo_recover_chan(&fifo->base, status.chan->id); 532 } 533 534 /* Determine MMU fault ID for the engine, if we're not being 535 * called from the fault handler already. 536 */ 537 if (!status.faulted && engine) { 538 mmui = nvkm_top_fault_id(device, engine->subdev.type, engine->subdev.inst); 539 if (mmui < 0) { 540 const struct nvkm_enum *en = fifo->func->mmu_fault->engine; 541 for (; en && en->name; en++) { 542 if (en->data2 == engine->subdev.type && 543 en->inst == engine->subdev.inst) { 544 mmui = en->value; 545 break; 546 } 547 } 548 } 549 WARN_ON(mmui < 0); 550 } 551 552 /* Trigger a MMU fault for the engine. 553 * 554 * No good idea why this is needed, but nvgpu does something similar, 555 * and it makes recovery from CTXSW_TIMEOUT a lot more reliable. 556 */ 557 if (mmui >= 0) { 558 nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000100 | mmui); 559 560 /* Wait for fault to trigger. */ 561 nvkm_msec(device, 2000, 562 gk104_fifo_engine_status(fifo, engn, &status); 563 if (status.faulted) 564 break; 565 ); 566 567 /* Release MMU fault trigger, and ACK the fault. */ 568 nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000000); 569 nvkm_wr32(device, 0x00259c, BIT(mmui)); 570 nvkm_wr32(device, 0x002100, 0x10000000); 571 } 572 573 /* Schedule recovery. */ 574 nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn); 575 schedule_work(&fifo->recover.work); 576 } 577 578 static const struct nvkm_enum 579 gk104_fifo_mmu_fault_engine[] = { 580 { 0x00, "GR", NULL, NVKM_ENGINE_GR }, 581 { 0x01, "DISPLAY" }, 582 { 0x02, "CAPTURE" }, 583 { 0x03, "IFB", NULL, NVKM_ENGINE_IFB }, 584 { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR }, 585 { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM }, 586 { 0x06, "SCHED" }, 587 { 0x07, "HOST0" }, 588 { 0x08, "HOST1" }, 589 { 0x09, "HOST2" }, 590 { 0x0a, "HOST3" }, 591 { 0x0b, "HOST4" }, 592 { 0x0c, "HOST5" }, 593 { 0x0d, "HOST6" }, 594 { 0x0e, "HOST7" }, 595 { 0x0f, "HOSTSR" }, 596 { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD }, 597 { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP }, 598 { 0x13, "PERF" }, 599 { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC }, 600 { 0x15, "CE0", NULL, NVKM_ENGINE_CE, 0 }, 601 { 0x16, "CE1", NULL, NVKM_ENGINE_CE, 1 }, 602 { 0x17, "PMU" }, 603 { 0x18, "PTP" }, 604 { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC }, 605 { 0x1b, "CE2", NULL, NVKM_ENGINE_CE, 2 }, 606 {} 607 }; 608 609 const struct nvkm_enum 610 gk104_fifo_mmu_fault_reason[] = { 611 { 0x00, "PDE" }, 612 { 0x01, "PDE_SIZE" }, 613 { 0x02, "PTE" }, 614 { 0x03, "VA_LIMIT_VIOLATION" }, 615 { 0x04, "UNBOUND_INST_BLOCK" }, 616 { 0x05, "PRIV_VIOLATION" }, 617 { 0x06, "RO_VIOLATION" }, 618 { 0x07, "WO_VIOLATION" }, 619 { 0x08, "PITCH_MASK_VIOLATION" }, 620 { 0x09, "WORK_CREATION" }, 621 { 0x0a, "UNSUPPORTED_APERTURE" }, 622 { 0x0b, "COMPRESSION_FAILURE" }, 623 { 0x0c, "UNSUPPORTED_KIND" }, 624 { 0x0d, "REGION_VIOLATION" }, 625 { 0x0e, "BOTH_PTES_VALID" }, 626 { 0x0f, "INFO_TYPE_POISONED" }, 627 {} 628 }; 629 630 const struct nvkm_enum 631 gk104_fifo_mmu_fault_hubclient[] = { 632 { 0x00, "VIP" }, 633 { 0x01, "CE0" }, 634 { 0x02, "CE1" }, 635 { 0x03, "DNISO" }, 636 { 0x04, "FE" }, 637 { 0x05, "FECS" }, 638 { 0x06, "HOST" }, 639 { 0x07, "HOST_CPU" }, 640 { 0x08, "HOST_CPU_NB" }, 641 { 0x09, "ISO" }, 642 { 0x0a, "MMU" }, 643 { 0x0b, "MSPDEC" }, 644 { 0x0c, "MSPPP" }, 645 { 0x0d, "MSVLD" }, 646 { 0x0e, "NISO" }, 647 { 0x0f, "P2P" }, 648 { 0x10, "PD" }, 649 { 0x11, "PERF" }, 650 { 0x12, "PMU" }, 651 { 0x13, "RASTERTWOD" }, 652 { 0x14, "SCC" }, 653 { 0x15, "SCC_NB" }, 654 { 0x16, "SEC" }, 655 { 0x17, "SSYNC" }, 656 { 0x18, "GR_CE" }, 657 { 0x19, "CE2" }, 658 { 0x1a, "XV" }, 659 { 0x1b, "MMU_NB" }, 660 { 0x1c, "MSENC" }, 661 { 0x1d, "DFALCON" }, 662 { 0x1e, "SKED" }, 663 { 0x1f, "AFALCON" }, 664 {} 665 }; 666 667 const struct nvkm_enum 668 gk104_fifo_mmu_fault_gpcclient[] = { 669 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" }, 670 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" }, 671 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" }, 672 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" }, 673 { 0x0c, "RAST" }, 674 { 0x0d, "GCC" }, 675 { 0x0e, "GPCCS" }, 676 { 0x0f, "PROP_0" }, 677 { 0x10, "PROP_1" }, 678 { 0x11, "PROP_2" }, 679 { 0x12, "PROP_3" }, 680 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" }, 681 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" }, 682 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" }, 683 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" }, 684 { 0x1f, "GPM" }, 685 { 0x20, "LTP_UTLB_0" }, 686 { 0x21, "LTP_UTLB_1" }, 687 { 0x22, "LTP_UTLB_2" }, 688 { 0x23, "LTP_UTLB_3" }, 689 { 0x24, "GPC_RGG_UTLB" }, 690 {} 691 }; 692 693 const struct nvkm_fifo_func_mmu_fault 694 gk104_fifo_mmu_fault = { 695 .recover = gf100_fifo_mmu_fault_recover, 696 .access = gf100_fifo_mmu_fault_access, 697 .engine = gk104_fifo_mmu_fault_engine, 698 .reason = gk104_fifo_mmu_fault_reason, 699 .hubclient = gk104_fifo_mmu_fault_hubclient, 700 .gpcclient = gk104_fifo_mmu_fault_gpcclient, 701 }; 702 703 static const struct nvkm_enum 704 gk104_fifo_intr_bind_reason[] = { 705 { 0x01, "BIND_NOT_UNBOUND" }, 706 { 0x02, "SNOOP_WITHOUT_BAR1" }, 707 { 0x03, "UNBIND_WHILE_RUNNING" }, 708 { 0x05, "INVALID_RUNLIST" }, 709 { 0x06, "INVALID_CTX_TGT" }, 710 { 0x0b, "UNBIND_WHILE_PARKED" }, 711 {} 712 }; 713 714 void 715 gk104_fifo_intr_bind(struct nvkm_fifo *fifo) 716 { 717 struct nvkm_subdev *subdev = &fifo->engine.subdev; 718 u32 intr = nvkm_rd32(subdev->device, 0x00252c); 719 u32 code = intr & 0x000000ff; 720 const struct nvkm_enum *en = nvkm_enum_find(gk104_fifo_intr_bind_reason, code); 721 722 nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : ""); 723 } 724 725 static const struct nvkm_enum 726 gk104_fifo_sched_reason[] = { 727 { 0x0a, "CTXSW_TIMEOUT" }, 728 {} 729 }; 730 731 static void 732 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) 733 { 734 struct nvkm_device *device = fifo->base.engine.subdev.device; 735 unsigned long flags, engm = 0; 736 u32 engn; 737 738 /* We need to ACK the SCHED_ERROR here, and prevent it reasserting, 739 * as MMU_FAULT cannot be triggered while it's pending. 740 */ 741 spin_lock_irqsave(&fifo->base.lock, flags); 742 nvkm_mask(device, 0x002140, 0x00000100, 0x00000000); 743 nvkm_wr32(device, 0x002100, 0x00000100); 744 745 for (engn = 0; engn < fifo->engine_nr; engn++) { 746 struct gk104_fifo_engine_status status; 747 748 gk104_fifo_engine_status(fifo, engn, &status); 749 if (!status.busy || !status.chsw) 750 continue; 751 752 engm |= BIT(engn); 753 } 754 755 for_each_set_bit(engn, &engm, fifo->engine_nr) 756 gk104_fifo_recover_engn(fifo, engn); 757 758 nvkm_mask(device, 0x002140, 0x00000100, 0x00000100); 759 spin_unlock_irqrestore(&fifo->base.lock, flags); 760 } 761 762 static void 763 gk104_fifo_intr_sched(struct gk104_fifo *fifo) 764 { 765 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 766 struct nvkm_device *device = subdev->device; 767 u32 intr = nvkm_rd32(device, 0x00254c); 768 u32 code = intr & 0x000000ff; 769 const struct nvkm_enum *en = 770 nvkm_enum_find(gk104_fifo_sched_reason, code); 771 772 nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : ""); 773 774 switch (code) { 775 case 0x0a: 776 gk104_fifo_intr_sched_ctxsw(fifo); 777 break; 778 default: 779 break; 780 } 781 } 782 783 void 784 gk104_fifo_intr_chsw(struct nvkm_fifo *fifo) 785 { 786 struct nvkm_subdev *subdev = &fifo->engine.subdev; 787 struct nvkm_device *device = subdev->device; 788 u32 stat = nvkm_rd32(device, 0x00256c); 789 790 nvkm_error(subdev, "CHSW_ERROR %08x\n", stat); 791 nvkm_wr32(device, 0x00256c, stat); 792 } 793 794 static void 795 gk104_fifo_intr_dropped_fault(struct nvkm_fifo *fifo) 796 { 797 struct nvkm_subdev *subdev = &fifo->engine.subdev; 798 u32 stat = nvkm_rd32(subdev->device, 0x00259c); 799 800 nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat); 801 } 802 803 void 804 gk104_fifo_intr_runlist(struct nvkm_fifo *fifo) 805 { 806 struct nvkm_device *device = fifo->engine.subdev.device; 807 struct nvkm_runl *runl; 808 u32 mask = nvkm_rd32(device, 0x002a00); 809 810 nvkm_runl_foreach_cond(runl, fifo, mask & BIT(runl->id)) { 811 nvkm_wr32(device, 0x002a00, BIT(runl->id)); 812 } 813 } 814 815 irqreturn_t 816 gk104_fifo_intr(struct nvkm_inth *inth) 817 { 818 struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth); 819 struct nvkm_subdev *subdev = &fifo->engine.subdev; 820 struct nvkm_device *device = subdev->device; 821 u32 mask = nvkm_rd32(device, 0x002140); 822 u32 stat = nvkm_rd32(device, 0x002100) & mask; 823 824 if (stat & 0x00000001) { 825 gk104_fifo_intr_bind(fifo); 826 nvkm_wr32(device, 0x002100, 0x00000001); 827 stat &= ~0x00000001; 828 } 829 830 if (stat & 0x00000010) { 831 nvkm_error(subdev, "PIO_ERROR\n"); 832 nvkm_wr32(device, 0x002100, 0x00000010); 833 stat &= ~0x00000010; 834 } 835 836 if (stat & 0x00000100) { 837 gk104_fifo_intr_sched(gk104_fifo(fifo)); 838 nvkm_wr32(device, 0x002100, 0x00000100); 839 stat &= ~0x00000100; 840 } 841 842 if (stat & 0x00010000) { 843 gk104_fifo_intr_chsw(fifo); 844 nvkm_wr32(device, 0x002100, 0x00010000); 845 stat &= ~0x00010000; 846 } 847 848 if (stat & 0x00800000) { 849 nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n"); 850 nvkm_wr32(device, 0x002100, 0x00800000); 851 stat &= ~0x00800000; 852 } 853 854 if (stat & 0x01000000) { 855 nvkm_error(subdev, "LB_ERROR\n"); 856 nvkm_wr32(device, 0x002100, 0x01000000); 857 stat &= ~0x01000000; 858 } 859 860 if (stat & 0x08000000) { 861 gk104_fifo_intr_dropped_fault(fifo); 862 nvkm_wr32(device, 0x002100, 0x08000000); 863 stat &= ~0x08000000; 864 } 865 866 if (stat & 0x10000000) { 867 gf100_fifo_intr_mmu_fault(fifo); 868 stat &= ~0x10000000; 869 } 870 871 if (stat & 0x20000000) { 872 if (gf100_fifo_intr_pbdma(fifo)) 873 stat &= ~0x20000000; 874 } 875 876 if (stat & 0x40000000) { 877 gk104_fifo_intr_runlist(fifo); 878 stat &= ~0x40000000; 879 } 880 881 if (stat & 0x80000000) { 882 nvkm_wr32(device, 0x002100, 0x80000000); 883 nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT); 884 stat &= ~0x80000000; 885 } 886 887 if (stat) { 888 nvkm_error(subdev, "INTR %08x\n", stat); 889 spin_lock(&fifo->lock); 890 nvkm_mask(device, 0x002140, stat, 0x00000000); 891 spin_unlock(&fifo->lock); 892 nvkm_wr32(device, 0x002100, stat); 893 } 894 895 return IRQ_HANDLED; 896 } 897 898 void 899 gk104_fifo_fini(struct nvkm_fifo *base) 900 { 901 struct gk104_fifo *fifo = gk104_fifo(base); 902 flush_work(&fifo->recover.work); 903 } 904 905 void 906 gk104_fifo_init_pbdmas(struct nvkm_fifo *fifo, u32 mask) 907 { 908 struct nvkm_device *device = fifo->engine.subdev.device; 909 910 nvkm_wr32(device, 0x000204, mask); 911 nvkm_mask(device, 0x002a04, 0xbfffffff, 0xbfffffff); 912 } 913 914 void 915 gk104_fifo_init(struct nvkm_fifo *base) 916 { 917 struct gk104_fifo *fifo = gk104_fifo(base); 918 struct nvkm_device *device = fifo->base.engine.subdev.device; 919 920 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12); 921 922 nvkm_wr32(device, 0x002100, 0xffffffff); 923 nvkm_wr32(device, 0x002140, 0x7fffffff); 924 } 925 926 int 927 gk104_fifo_runl_ctor(struct nvkm_fifo *fifo) 928 { 929 struct nvkm_device *device = fifo->engine.subdev.device; 930 struct nvkm_top_device *tdev; 931 struct nvkm_runl *runl; 932 struct nvkm_runq *runq; 933 const struct nvkm_engn_func *func; 934 935 nvkm_list_foreach(tdev, &device->top->device, head, tdev->runlist >= 0) { 936 runl = nvkm_runl_get(fifo, tdev->runlist, tdev->runlist); 937 if (!runl) { 938 runl = nvkm_runl_new(fifo, tdev->runlist, tdev->runlist, 0); 939 if (IS_ERR(runl)) 940 return PTR_ERR(runl); 941 942 nvkm_runq_foreach_cond(runq, fifo, gk104_runq_runm(runq) & BIT(runl->id)) { 943 if (WARN_ON(runl->runq_nr == ARRAY_SIZE(runl->runq))) 944 return -ENOMEM; 945 946 runl->runq[runl->runq_nr++] = runq; 947 } 948 949 } 950 951 if (tdev->engine < 0) 952 continue; 953 954 switch (tdev->type) { 955 case NVKM_ENGINE_CE: 956 func = fifo->func->engn_ce; 957 break; 958 case NVKM_ENGINE_GR: 959 nvkm_runl_add(runl, 15, &gf100_engn_sw, NVKM_ENGINE_SW, 0); 960 fallthrough; 961 default: 962 func = fifo->func->engn; 963 break; 964 } 965 966 nvkm_runl_add(runl, tdev->engine, func, tdev->type, tdev->inst); 967 } 968 969 return 0; 970 } 971 972 int 973 gk104_fifo_chid_nr(struct nvkm_fifo *fifo) 974 { 975 return 4096; 976 } 977 978 int 979 gk104_fifo_oneinit(struct nvkm_fifo *base) 980 { 981 struct gk104_fifo *fifo = gk104_fifo(base); 982 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 983 struct nvkm_device *device = subdev->device; 984 struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device); 985 struct nvkm_top_device *tdev; 986 int ret, i, j; 987 988 /* Determine runlist configuration from topology device info. */ 989 list_for_each_entry(tdev, &device->top->device, head) { 990 const int engn = tdev->engine; 991 992 if (engn < 0) 993 continue; 994 995 fifo->engine[engn].engine = nvkm_device_engine(device, tdev->type, tdev->inst); 996 fifo->engine[engn].runl = tdev->runlist; 997 fifo->engine_nr = max(fifo->engine_nr, engn + 1); 998 fifo->runlist[tdev->runlist].engm |= BIT(engn); 999 fifo->runlist[tdev->runlist].engm_sw |= BIT(engn); 1000 if (tdev->type == NVKM_ENGINE_GR) 1001 fifo->runlist[tdev->runlist].engm_sw |= BIT(GK104_FIFO_ENGN_SW); 1002 fifo->runlist_nr = max(fifo->runlist_nr, tdev->runlist + 1); 1003 } 1004 1005 for (i = 0; i < fifo->runlist_nr; i++) { 1006 for (j = 0; j < ARRAY_SIZE(fifo->runlist[i].mem); j++) { 1007 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 1008 fifo->base.nr * 2/* TSG+chan */ * 1009 fifo->func->runlist->size, 1010 0x1000, false, 1011 &fifo->runlist[i].mem[j]); 1012 if (ret) 1013 return ret; 1014 } 1015 1016 INIT_LIST_HEAD(&fifo->runlist[i].cgrp); 1017 INIT_LIST_HEAD(&fifo->runlist[i].chan); 1018 } 1019 1020 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 1021 fifo->base.nr * 0x200, 0x1000, true, 1022 &fifo->user.mem); 1023 if (ret) 1024 return ret; 1025 1026 ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem), 1027 &fifo->user.bar); 1028 if (ret) 1029 return ret; 1030 1031 return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0); 1032 } 1033 1034 void * 1035 gk104_fifo_dtor(struct nvkm_fifo *base) 1036 { 1037 struct gk104_fifo *fifo = gk104_fifo(base); 1038 struct nvkm_device *device = fifo->base.engine.subdev.device; 1039 int i; 1040 1041 nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar); 1042 nvkm_memory_unref(&fifo->user.mem); 1043 1044 for (i = 0; i < fifo->runlist_nr; i++) { 1045 nvkm_memory_unref(&fifo->runlist[i].mem[1]); 1046 nvkm_memory_unref(&fifo->runlist[i].mem[0]); 1047 } 1048 1049 return fifo; 1050 } 1051 1052 int 1053 gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device, 1054 enum nvkm_subdev_type type, int inst, int nr, struct nvkm_fifo **pfifo) 1055 { 1056 struct gk104_fifo *fifo; 1057 1058 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) 1059 return -ENOMEM; 1060 fifo->func = func; 1061 INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work); 1062 *pfifo = &fifo->base; 1063 1064 return nvkm_fifo_ctor(func, device, type, inst, &fifo->base); 1065 } 1066 1067 static const struct nvkm_fifo_func 1068 gk104_fifo = { 1069 .dtor = gk104_fifo_dtor, 1070 .oneinit = gk104_fifo_oneinit, 1071 .chid_nr = gk104_fifo_chid_nr, 1072 .chid_ctor = gf100_fifo_chid_ctor, 1073 .runq_nr = gf100_fifo_runq_nr, 1074 .runl_ctor = gk104_fifo_runl_ctor, 1075 .init = gk104_fifo_init, 1076 .init_pbdmas = gk104_fifo_init_pbdmas, 1077 .fini = gk104_fifo_fini, 1078 .intr = gk104_fifo_intr, 1079 .intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit, 1080 .mmu_fault = &gk104_fifo_mmu_fault, 1081 .engine_id = gk104_fifo_engine_id, 1082 .recover_chan = gk104_fifo_recover_chan, 1083 .runlist = &gk104_fifo_runlist, 1084 .nonstall = &gf100_fifo_nonstall, 1085 .runl = &gk104_runl, 1086 .runq = &gk104_runq, 1087 .engn = &gk104_engn, 1088 .engn_ce = &gk104_engn_ce, 1089 .cgrp = {{ }, &nv04_cgrp }, 1090 .chan = {{ 0, 0, KEPLER_CHANNEL_GPFIFO_A }, &gk104_chan, .ctor = &gk104_fifo_gpfifo_new }, 1091 }; 1092 1093 int 1094 gk104_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, 1095 struct nvkm_fifo **pfifo) 1096 { 1097 return gk104_fifo_new_(&gk104_fifo, device, type, inst, 0, pfifo); 1098 } 1099