1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "chan.h" 25 #include "chid.h" 26 #include "runl.h" 27 #include "runq.h" 28 29 #include "gk104.h" 30 #include "cgrp.h" 31 #include "changk104.h" 32 33 #include <core/gpuobj.h> 34 #include <subdev/bar.h> 35 #include <subdev/mc.h> 36 #include <subdev/timer.h> 37 #include <subdev/top.h> 38 39 #include <nvif/class.h> 40 41 void 42 gk104_chan_stop(struct nvkm_chan *chan) 43 { 44 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; 45 46 nvkm_mask(device, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800); 47 } 48 49 void 50 gk104_chan_start(struct nvkm_chan *chan) 51 { 52 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; 53 54 nvkm_mask(device, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400); 55 } 56 57 void 58 gk104_chan_unbind(struct nvkm_chan *chan) 59 { 60 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; 61 62 nvkm_wr32(device, 0x800000 + (chan->id * 8), 0x00000000); 63 } 64 65 void 66 gk104_chan_bind_inst(struct nvkm_chan *chan) 67 { 68 struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; 69 70 nvkm_wr32(device, 0x800000 + (chan->id * 8), 0x80000000 | chan->inst->addr >> 12); 71 } 72 73 void 74 gk104_chan_bind(struct nvkm_chan *chan) 75 { 76 struct nvkm_runl *runl = chan->cgrp->runl; 77 struct nvkm_device *device = runl->fifo->engine.subdev.device; 78 79 nvkm_mask(device, 0x800004 + (chan->id * 8), 0x000f0000, runl->id << 16); 80 gk104_chan_bind_inst(chan); 81 } 82 83 static const struct nvkm_chan_func 84 gk104_chan = { 85 .bind = gk104_chan_bind, 86 .unbind = gk104_chan_unbind, 87 .start = gk104_chan_start, 88 .stop = gk104_chan_stop, 89 .preempt = gf100_chan_preempt, 90 }; 91 92 void 93 gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn, 94 struct gk104_fifo_engine_status *status) 95 { 96 struct nvkm_engine *engine = fifo->engine[engn].engine; 97 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 98 struct nvkm_device *device = subdev->device; 99 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08)); 100 101 status->busy = !!(stat & 0x80000000); 102 status->faulted = !!(stat & 0x40000000); 103 status->next.tsg = !!(stat & 0x10000000); 104 status->next.id = (stat & 0x0fff0000) >> 16; 105 status->chsw = !!(stat & 0x00008000); 106 status->save = !!(stat & 0x00004000); 107 status->load = !!(stat & 0x00002000); 108 status->prev.tsg = !!(stat & 0x00001000); 109 status->prev.id = (stat & 0x00000fff); 110 status->chan = NULL; 111 112 if (status->busy && status->chsw) { 113 if (status->load && status->save) { 114 if (engine && nvkm_engine_chsw_load(engine)) 115 status->chan = &status->next; 116 else 117 status->chan = &status->prev; 118 } else 119 if (status->load) { 120 status->chan = &status->next; 121 } else { 122 status->chan = &status->prev; 123 } 124 } else 125 if (status->load) { 126 status->chan = &status->prev; 127 } 128 129 nvkm_debug(subdev, "engine %02d: busy %d faulted %d chsw %d " 130 "save %d load %d %sid %d%s-> %sid %d%s\n", 131 engn, status->busy, status->faulted, 132 status->chsw, status->save, status->load, 133 status->prev.tsg ? "tsg" : "ch", status->prev.id, 134 status->chan == &status->prev ? "*" : " ", 135 status->next.tsg ? "tsg" : "ch", status->next.id, 136 status->chan == &status->next ? "*" : " "); 137 } 138 139 const struct nvkm_engn_func 140 gk104_engn = { 141 }; 142 143 const struct nvkm_engn_func 144 gk104_engn_ce = { 145 }; 146 147 static const struct nvkm_bitfield 148 gk104_runq_intr_1_names[] = { 149 { 0x00000001, "HCE_RE_ILLEGAL_OP" }, 150 { 0x00000002, "HCE_RE_ALIGNB" }, 151 { 0x00000004, "HCE_PRIV" }, 152 { 0x00000008, "HCE_ILLEGAL_MTHD" }, 153 { 0x00000010, "HCE_ILLEGAL_CLASS" }, 154 {} 155 }; 156 157 static bool 158 gk104_runq_intr_1(struct nvkm_runq *runq) 159 { 160 struct nvkm_subdev *subdev = &runq->fifo->engine.subdev; 161 struct nvkm_device *device = subdev->device; 162 u32 mask = nvkm_rd32(device, 0x04014c + (runq->id * 0x2000)); 163 u32 stat = nvkm_rd32(device, 0x040148 + (runq->id * 0x2000)) & mask; 164 u32 chid = nvkm_rd32(device, 0x040120 + (runq->id * 0x2000)) & 0xfff; 165 char msg[128]; 166 167 if (stat & 0x80000000) { 168 if (runq->func->intr_1_ctxnotvalid && 169 runq->func->intr_1_ctxnotvalid(runq, chid)) 170 stat &= ~0x80000000; 171 } 172 173 if (stat) { 174 nvkm_snprintbf(msg, sizeof(msg), gk104_runq_intr_1_names, stat); 175 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n", 176 runq->id, stat, msg, chid, 177 nvkm_rd32(device, 0x040150 + (runq->id * 0x2000)), 178 nvkm_rd32(device, 0x040154 + (runq->id * 0x2000))); 179 } 180 181 nvkm_wr32(device, 0x040148 + (runq->id * 0x2000), stat); 182 return true; 183 } 184 185 const struct nvkm_bitfield 186 gk104_runq_intr_0_names[] = { 187 { 0x00000001, "MEMREQ" }, 188 { 0x00000002, "MEMACK_TIMEOUT" }, 189 { 0x00000004, "MEMACK_EXTRA" }, 190 { 0x00000008, "MEMDAT_TIMEOUT" }, 191 { 0x00000010, "MEMDAT_EXTRA" }, 192 { 0x00000020, "MEMFLUSH" }, 193 { 0x00000040, "MEMOP" }, 194 { 0x00000080, "LBCONNECT" }, 195 { 0x00000100, "LBREQ" }, 196 { 0x00000200, "LBACK_TIMEOUT" }, 197 { 0x00000400, "LBACK_EXTRA" }, 198 { 0x00000800, "LBDAT_TIMEOUT" }, 199 { 0x00001000, "LBDAT_EXTRA" }, 200 { 0x00002000, "GPFIFO" }, 201 { 0x00004000, "GPPTR" }, 202 { 0x00008000, "GPENTRY" }, 203 { 0x00010000, "GPCRC" }, 204 { 0x00020000, "PBPTR" }, 205 { 0x00040000, "PBENTRY" }, 206 { 0x00080000, "PBCRC" }, 207 { 0x00100000, "XBARCONNECT" }, 208 { 0x00200000, "METHOD" }, 209 { 0x00400000, "METHODCRC" }, 210 { 0x00800000, "DEVICE" }, 211 { 0x02000000, "SEMAPHORE" }, 212 { 0x04000000, "ACQUIRE" }, 213 { 0x08000000, "PRI" }, 214 { 0x20000000, "NO_CTXSW_SEG" }, 215 { 0x40000000, "PBSEG" }, 216 { 0x80000000, "SIGNATURE" }, 217 {} 218 }; 219 220 bool 221 gk104_runq_intr(struct nvkm_runq *runq, struct nvkm_runl *null) 222 { 223 bool intr0 = gf100_runq_intr(runq, NULL); 224 bool intr1 = gk104_runq_intr_1(runq); 225 226 return intr0 || intr1; 227 } 228 229 void 230 gk104_runq_init(struct nvkm_runq *runq) 231 { 232 struct nvkm_device *device = runq->fifo->engine.subdev.device; 233 234 gf100_runq_init(runq); 235 236 nvkm_wr32(device, 0x040148 + (runq->id * 0x2000), 0xffffffff); /* HCE.INTR */ 237 nvkm_wr32(device, 0x04014c + (runq->id * 0x2000), 0xffffffff); /* HCE.INTREN */ 238 } 239 240 static u32 241 gk104_runq_runm(struct nvkm_runq *runq) 242 { 243 return nvkm_rd32(runq->fifo->engine.subdev.device, 0x002390 + (runq->id * 0x04)); 244 } 245 246 const struct nvkm_runq_func 247 gk104_runq = { 248 .init = gk104_runq_init, 249 .intr = gk104_runq_intr, 250 .intr_0_names = gk104_runq_intr_0_names, 251 }; 252 253 void 254 gk104_runl_allow(struct nvkm_runl *runl, u32 engm) 255 { 256 nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), 0x00000000); 257 } 258 259 void 260 gk104_runl_block(struct nvkm_runl *runl, u32 engm) 261 { 262 nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), BIT(runl->id)); 263 } 264 265 bool 266 gk104_runl_pending(struct nvkm_runl *runl) 267 { 268 struct nvkm_device *device = runl->fifo->engine.subdev.device; 269 270 return nvkm_rd32(device, 0x002284 + (runl->id * 0x08)) & 0x00100000; 271 } 272 273 void 274 gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl, 275 struct nvkm_memory *mem, int nr) 276 { 277 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 278 struct nvkm_device *device = subdev->device; 279 struct nvkm_runl *rl = nvkm_runl_get(&fifo->base, runl, 0); 280 int target; 281 282 switch (nvkm_memory_target(mem)) { 283 case NVKM_MEM_TARGET_VRAM: target = 0; break; 284 case NVKM_MEM_TARGET_NCOH: target = 3; break; 285 default: 286 WARN_ON(1); 287 return; 288 } 289 290 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) | 291 (target << 28)); 292 nvkm_wr32(device, 0x002274, (runl << 20) | nr); 293 294 rl->func->wait(rl); 295 } 296 297 void 298 gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl) 299 { 300 const struct gk104_fifo_runlist_func *func = fifo->func->runlist; 301 struct gk104_fifo_chan *chan; 302 struct nvkm_memory *mem; 303 struct nvkm_fifo_cgrp *cgrp; 304 int nr = 0; 305 306 mutex_lock(&fifo->base.mutex); 307 mem = fifo->runlist[runl].mem[fifo->runlist[runl].next]; 308 fifo->runlist[runl].next = !fifo->runlist[runl].next; 309 310 nvkm_kmap(mem); 311 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { 312 func->chan(chan, mem, nr++ * func->size); 313 } 314 315 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) { 316 func->cgrp(cgrp, mem, nr++ * func->size); 317 list_for_each_entry(chan, &cgrp->chan, head) { 318 func->chan(chan, mem, nr++ * func->size); 319 } 320 } 321 nvkm_done(mem); 322 323 func->commit(fifo, runl, mem, nr); 324 mutex_unlock(&fifo->base.mutex); 325 } 326 327 void 328 gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) 329 { 330 struct nvkm_fifo_cgrp *cgrp = chan->cgrp; 331 mutex_lock(&fifo->base.mutex); 332 if (!list_empty(&chan->head)) { 333 list_del_init(&chan->head); 334 if (cgrp && !--cgrp->chan_nr) 335 list_del_init(&cgrp->head); 336 } 337 mutex_unlock(&fifo->base.mutex); 338 } 339 340 void 341 gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) 342 { 343 struct nvkm_fifo_cgrp *cgrp = chan->cgrp; 344 mutex_lock(&fifo->base.mutex); 345 if (cgrp) { 346 if (!cgrp->chan_nr++) 347 list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp); 348 list_add_tail(&chan->head, &cgrp->chan); 349 } else { 350 list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan); 351 } 352 mutex_unlock(&fifo->base.mutex); 353 } 354 355 void 356 gk104_fifo_runlist_chan(struct gk104_fifo_chan *chan, 357 struct nvkm_memory *memory, u32 offset) 358 { 359 nvkm_wo32(memory, offset + 0, chan->base.chid); 360 nvkm_wo32(memory, offset + 4, 0x00000000); 361 } 362 363 const struct gk104_fifo_runlist_func 364 gk104_fifo_runlist = { 365 .size = 8, 366 .chan = gk104_fifo_runlist_chan, 367 .commit = gk104_fifo_runlist_commit, 368 }; 369 370 static const struct nvkm_runl_func 371 gk104_runl = { 372 .wait = nv50_runl_wait, 373 .pending = gk104_runl_pending, 374 .block = gk104_runl_block, 375 .allow = gk104_runl_allow, 376 .preempt_pending = gf100_runl_preempt_pending, 377 }; 378 379 int 380 gk104_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine) 381 { 382 struct gk104_fifo *fifo = gk104_fifo(base); 383 int engn; 384 385 if (engine->subdev.type == NVKM_ENGINE_SW) 386 return GK104_FIFO_ENGN_SW; 387 388 for (engn = 0; engn < fifo->engine_nr && engine; engn++) { 389 if (fifo->engine[engn].engine == engine) 390 return engn; 391 } 392 393 WARN_ON(1); 394 return -1; 395 } 396 397 static void 398 gk104_fifo_recover_work(struct work_struct *w) 399 { 400 struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work); 401 struct nvkm_device *device = fifo->base.engine.subdev.device; 402 struct nvkm_engine *engine; 403 unsigned long flags; 404 u32 engm, runm, todo; 405 int engn, runl; 406 407 spin_lock_irqsave(&fifo->base.lock, flags); 408 runm = fifo->recover.runm; 409 engm = fifo->recover.engm; 410 fifo->recover.engm = 0; 411 fifo->recover.runm = 0; 412 spin_unlock_irqrestore(&fifo->base.lock, flags); 413 414 nvkm_mask(device, 0x002630, runm, runm); 415 416 for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) { 417 if ((engine = fifo->engine[engn].engine)) { 418 nvkm_subdev_fini(&engine->subdev, false); 419 WARN_ON(nvkm_subdev_init(&engine->subdev)); 420 } 421 } 422 423 for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl)) 424 gk104_fifo_runlist_update(fifo, runl); 425 426 nvkm_wr32(device, 0x00262c, runm); 427 nvkm_mask(device, 0x002630, runm, 0x00000000); 428 } 429 430 static void gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn); 431 432 static void 433 gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl) 434 { 435 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 436 struct nvkm_device *device = subdev->device; 437 const u32 runm = BIT(runl); 438 439 assert_spin_locked(&fifo->base.lock); 440 if (fifo->recover.runm & runm) 441 return; 442 fifo->recover.runm |= runm; 443 444 /* Block runlist to prevent channel assignment(s) from changing. */ 445 nvkm_mask(device, 0x002630, runm, runm); 446 447 /* Schedule recovery. */ 448 nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl); 449 schedule_work(&fifo->recover.work); 450 } 451 452 static struct gk104_fifo_chan * 453 gk104_fifo_recover_chid(struct gk104_fifo *fifo, int runl, int chid) 454 { 455 struct gk104_fifo_chan *chan; 456 struct nvkm_fifo_cgrp *cgrp; 457 458 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { 459 if (chan->base.chid == chid) { 460 list_del_init(&chan->head); 461 return chan; 462 } 463 } 464 465 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) { 466 if (cgrp->id == chid) { 467 chan = list_first_entry(&cgrp->chan, typeof(*chan), head); 468 list_del_init(&chan->head); 469 if (!--cgrp->chan_nr) 470 list_del_init(&cgrp->head); 471 return chan; 472 } 473 } 474 475 return NULL; 476 } 477 478 void 479 gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid) 480 { 481 struct gk104_fifo *fifo = gk104_fifo(base); 482 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 483 struct nvkm_device *device = subdev->device; 484 const u32 stat = nvkm_rd32(device, 0x800004 + (chid * 0x08)); 485 const u32 runl = (stat & 0x000f0000) >> 16; 486 const bool used = (stat & 0x00000001); 487 unsigned long engn, engm = fifo->runlist[runl].engm; 488 struct gk104_fifo_chan *chan; 489 490 assert_spin_locked(&fifo->base.lock); 491 if (!used) 492 return; 493 494 /* Lookup SW state for channel, and mark it as dead. */ 495 chan = gk104_fifo_recover_chid(fifo, runl, chid); 496 if (chan) { 497 chan->killed = true; 498 nvkm_chan_error(&chan->base, false); 499 } 500 501 /* Block channel assignments from changing during recovery. */ 502 gk104_fifo_recover_runl(fifo, runl); 503 504 /* Schedule recovery for any engines the channel is on. */ 505 for_each_set_bit(engn, &engm, fifo->engine_nr) { 506 struct gk104_fifo_engine_status status; 507 gk104_fifo_engine_status(fifo, engn, &status); 508 if (!status.chan || status.chan->id != chid) 509 continue; 510 gk104_fifo_recover_engn(fifo, engn); 511 } 512 } 513 514 static void 515 gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn) 516 { 517 struct nvkm_engine *engine = fifo->engine[engn].engine; 518 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 519 struct nvkm_device *device = subdev->device; 520 const u32 runl = fifo->engine[engn].runl; 521 const u32 engm = BIT(engn); 522 struct gk104_fifo_engine_status status; 523 int mmui = -1; 524 525 assert_spin_locked(&fifo->base.lock); 526 if (fifo->recover.engm & engm) 527 return; 528 fifo->recover.engm |= engm; 529 530 /* Block channel assignments from changing during recovery. */ 531 gk104_fifo_recover_runl(fifo, runl); 532 533 /* Determine which channel (if any) is currently on the engine. */ 534 gk104_fifo_engine_status(fifo, engn, &status); 535 if (status.chan) { 536 /* The channel is not longer viable, kill it. */ 537 gk104_fifo_recover_chan(&fifo->base, status.chan->id); 538 } 539 540 /* Determine MMU fault ID for the engine, if we're not being 541 * called from the fault handler already. 542 */ 543 if (!status.faulted && engine) { 544 mmui = nvkm_top_fault_id(device, engine->subdev.type, engine->subdev.inst); 545 if (mmui < 0) { 546 const struct nvkm_enum *en = fifo->func->mmu_fault->engine; 547 for (; en && en->name; en++) { 548 if (en->data2 == engine->subdev.type && 549 en->inst == engine->subdev.inst) { 550 mmui = en->value; 551 break; 552 } 553 } 554 } 555 WARN_ON(mmui < 0); 556 } 557 558 /* Trigger a MMU fault for the engine. 559 * 560 * No good idea why this is needed, but nvgpu does something similar, 561 * and it makes recovery from CTXSW_TIMEOUT a lot more reliable. 562 */ 563 if (mmui >= 0) { 564 nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000100 | mmui); 565 566 /* Wait for fault to trigger. */ 567 nvkm_msec(device, 2000, 568 gk104_fifo_engine_status(fifo, engn, &status); 569 if (status.faulted) 570 break; 571 ); 572 573 /* Release MMU fault trigger, and ACK the fault. */ 574 nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000000); 575 nvkm_wr32(device, 0x00259c, BIT(mmui)); 576 nvkm_wr32(device, 0x002100, 0x10000000); 577 } 578 579 /* Schedule recovery. */ 580 nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn); 581 schedule_work(&fifo->recover.work); 582 } 583 584 static const struct nvkm_enum 585 gk104_fifo_mmu_fault_engine[] = { 586 { 0x00, "GR", NULL, NVKM_ENGINE_GR }, 587 { 0x01, "DISPLAY" }, 588 { 0x02, "CAPTURE" }, 589 { 0x03, "IFB", NULL, NVKM_ENGINE_IFB }, 590 { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR }, 591 { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM }, 592 { 0x06, "SCHED" }, 593 { 0x07, "HOST0" }, 594 { 0x08, "HOST1" }, 595 { 0x09, "HOST2" }, 596 { 0x0a, "HOST3" }, 597 { 0x0b, "HOST4" }, 598 { 0x0c, "HOST5" }, 599 { 0x0d, "HOST6" }, 600 { 0x0e, "HOST7" }, 601 { 0x0f, "HOSTSR" }, 602 { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD }, 603 { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP }, 604 { 0x13, "PERF" }, 605 { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC }, 606 { 0x15, "CE0", NULL, NVKM_ENGINE_CE, 0 }, 607 { 0x16, "CE1", NULL, NVKM_ENGINE_CE, 1 }, 608 { 0x17, "PMU" }, 609 { 0x18, "PTP" }, 610 { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC }, 611 { 0x1b, "CE2", NULL, NVKM_ENGINE_CE, 2 }, 612 {} 613 }; 614 615 const struct nvkm_enum 616 gk104_fifo_mmu_fault_reason[] = { 617 { 0x00, "PDE" }, 618 { 0x01, "PDE_SIZE" }, 619 { 0x02, "PTE" }, 620 { 0x03, "VA_LIMIT_VIOLATION" }, 621 { 0x04, "UNBOUND_INST_BLOCK" }, 622 { 0x05, "PRIV_VIOLATION" }, 623 { 0x06, "RO_VIOLATION" }, 624 { 0x07, "WO_VIOLATION" }, 625 { 0x08, "PITCH_MASK_VIOLATION" }, 626 { 0x09, "WORK_CREATION" }, 627 { 0x0a, "UNSUPPORTED_APERTURE" }, 628 { 0x0b, "COMPRESSION_FAILURE" }, 629 { 0x0c, "UNSUPPORTED_KIND" }, 630 { 0x0d, "REGION_VIOLATION" }, 631 { 0x0e, "BOTH_PTES_VALID" }, 632 { 0x0f, "INFO_TYPE_POISONED" }, 633 {} 634 }; 635 636 const struct nvkm_enum 637 gk104_fifo_mmu_fault_hubclient[] = { 638 { 0x00, "VIP" }, 639 { 0x01, "CE0" }, 640 { 0x02, "CE1" }, 641 { 0x03, "DNISO" }, 642 { 0x04, "FE" }, 643 { 0x05, "FECS" }, 644 { 0x06, "HOST" }, 645 { 0x07, "HOST_CPU" }, 646 { 0x08, "HOST_CPU_NB" }, 647 { 0x09, "ISO" }, 648 { 0x0a, "MMU" }, 649 { 0x0b, "MSPDEC" }, 650 { 0x0c, "MSPPP" }, 651 { 0x0d, "MSVLD" }, 652 { 0x0e, "NISO" }, 653 { 0x0f, "P2P" }, 654 { 0x10, "PD" }, 655 { 0x11, "PERF" }, 656 { 0x12, "PMU" }, 657 { 0x13, "RASTERTWOD" }, 658 { 0x14, "SCC" }, 659 { 0x15, "SCC_NB" }, 660 { 0x16, "SEC" }, 661 { 0x17, "SSYNC" }, 662 { 0x18, "GR_CE" }, 663 { 0x19, "CE2" }, 664 { 0x1a, "XV" }, 665 { 0x1b, "MMU_NB" }, 666 { 0x1c, "MSENC" }, 667 { 0x1d, "DFALCON" }, 668 { 0x1e, "SKED" }, 669 { 0x1f, "AFALCON" }, 670 {} 671 }; 672 673 const struct nvkm_enum 674 gk104_fifo_mmu_fault_gpcclient[] = { 675 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" }, 676 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" }, 677 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" }, 678 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" }, 679 { 0x0c, "RAST" }, 680 { 0x0d, "GCC" }, 681 { 0x0e, "GPCCS" }, 682 { 0x0f, "PROP_0" }, 683 { 0x10, "PROP_1" }, 684 { 0x11, "PROP_2" }, 685 { 0x12, "PROP_3" }, 686 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" }, 687 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" }, 688 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" }, 689 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" }, 690 { 0x1f, "GPM" }, 691 { 0x20, "LTP_UTLB_0" }, 692 { 0x21, "LTP_UTLB_1" }, 693 { 0x22, "LTP_UTLB_2" }, 694 { 0x23, "LTP_UTLB_3" }, 695 { 0x24, "GPC_RGG_UTLB" }, 696 {} 697 }; 698 699 const struct nvkm_fifo_func_mmu_fault 700 gk104_fifo_mmu_fault = { 701 .recover = gf100_fifo_mmu_fault_recover, 702 .access = gf100_fifo_mmu_fault_access, 703 .engine = gk104_fifo_mmu_fault_engine, 704 .reason = gk104_fifo_mmu_fault_reason, 705 .hubclient = gk104_fifo_mmu_fault_hubclient, 706 .gpcclient = gk104_fifo_mmu_fault_gpcclient, 707 }; 708 709 static const struct nvkm_enum 710 gk104_fifo_intr_bind_reason[] = { 711 { 0x01, "BIND_NOT_UNBOUND" }, 712 { 0x02, "SNOOP_WITHOUT_BAR1" }, 713 { 0x03, "UNBIND_WHILE_RUNNING" }, 714 { 0x05, "INVALID_RUNLIST" }, 715 { 0x06, "INVALID_CTX_TGT" }, 716 { 0x0b, "UNBIND_WHILE_PARKED" }, 717 {} 718 }; 719 720 void 721 gk104_fifo_intr_bind(struct nvkm_fifo *fifo) 722 { 723 struct nvkm_subdev *subdev = &fifo->engine.subdev; 724 u32 intr = nvkm_rd32(subdev->device, 0x00252c); 725 u32 code = intr & 0x000000ff; 726 const struct nvkm_enum *en = nvkm_enum_find(gk104_fifo_intr_bind_reason, code); 727 728 nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : ""); 729 } 730 731 static const struct nvkm_enum 732 gk104_fifo_sched_reason[] = { 733 { 0x0a, "CTXSW_TIMEOUT" }, 734 {} 735 }; 736 737 static void 738 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) 739 { 740 struct nvkm_device *device = fifo->base.engine.subdev.device; 741 unsigned long flags, engm = 0; 742 u32 engn; 743 744 /* We need to ACK the SCHED_ERROR here, and prevent it reasserting, 745 * as MMU_FAULT cannot be triggered while it's pending. 746 */ 747 spin_lock_irqsave(&fifo->base.lock, flags); 748 nvkm_mask(device, 0x002140, 0x00000100, 0x00000000); 749 nvkm_wr32(device, 0x002100, 0x00000100); 750 751 for (engn = 0; engn < fifo->engine_nr; engn++) { 752 struct gk104_fifo_engine_status status; 753 754 gk104_fifo_engine_status(fifo, engn, &status); 755 if (!status.busy || !status.chsw) 756 continue; 757 758 engm |= BIT(engn); 759 } 760 761 for_each_set_bit(engn, &engm, fifo->engine_nr) 762 gk104_fifo_recover_engn(fifo, engn); 763 764 nvkm_mask(device, 0x002140, 0x00000100, 0x00000100); 765 spin_unlock_irqrestore(&fifo->base.lock, flags); 766 } 767 768 static void 769 gk104_fifo_intr_sched(struct gk104_fifo *fifo) 770 { 771 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 772 struct nvkm_device *device = subdev->device; 773 u32 intr = nvkm_rd32(device, 0x00254c); 774 u32 code = intr & 0x000000ff; 775 const struct nvkm_enum *en = 776 nvkm_enum_find(gk104_fifo_sched_reason, code); 777 778 nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : ""); 779 780 switch (code) { 781 case 0x0a: 782 gk104_fifo_intr_sched_ctxsw(fifo); 783 break; 784 default: 785 break; 786 } 787 } 788 789 void 790 gk104_fifo_intr_chsw(struct nvkm_fifo *fifo) 791 { 792 struct nvkm_subdev *subdev = &fifo->engine.subdev; 793 struct nvkm_device *device = subdev->device; 794 u32 stat = nvkm_rd32(device, 0x00256c); 795 796 nvkm_error(subdev, "CHSW_ERROR %08x\n", stat); 797 nvkm_wr32(device, 0x00256c, stat); 798 } 799 800 static void 801 gk104_fifo_intr_dropped_fault(struct nvkm_fifo *fifo) 802 { 803 struct nvkm_subdev *subdev = &fifo->engine.subdev; 804 u32 stat = nvkm_rd32(subdev->device, 0x00259c); 805 806 nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat); 807 } 808 809 void 810 gk104_fifo_intr_runlist(struct nvkm_fifo *fifo) 811 { 812 struct nvkm_device *device = fifo->engine.subdev.device; 813 struct nvkm_runl *runl; 814 u32 mask = nvkm_rd32(device, 0x002a00); 815 816 nvkm_runl_foreach_cond(runl, fifo, mask & BIT(runl->id)) { 817 nvkm_wr32(device, 0x002a00, BIT(runl->id)); 818 } 819 } 820 821 irqreturn_t 822 gk104_fifo_intr(struct nvkm_inth *inth) 823 { 824 struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth); 825 struct nvkm_subdev *subdev = &fifo->engine.subdev; 826 struct nvkm_device *device = subdev->device; 827 u32 mask = nvkm_rd32(device, 0x002140); 828 u32 stat = nvkm_rd32(device, 0x002100) & mask; 829 830 if (stat & 0x00000001) { 831 gk104_fifo_intr_bind(fifo); 832 nvkm_wr32(device, 0x002100, 0x00000001); 833 stat &= ~0x00000001; 834 } 835 836 if (stat & 0x00000010) { 837 nvkm_error(subdev, "PIO_ERROR\n"); 838 nvkm_wr32(device, 0x002100, 0x00000010); 839 stat &= ~0x00000010; 840 } 841 842 if (stat & 0x00000100) { 843 gk104_fifo_intr_sched(gk104_fifo(fifo)); 844 nvkm_wr32(device, 0x002100, 0x00000100); 845 stat &= ~0x00000100; 846 } 847 848 if (stat & 0x00010000) { 849 gk104_fifo_intr_chsw(fifo); 850 nvkm_wr32(device, 0x002100, 0x00010000); 851 stat &= ~0x00010000; 852 } 853 854 if (stat & 0x00800000) { 855 nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n"); 856 nvkm_wr32(device, 0x002100, 0x00800000); 857 stat &= ~0x00800000; 858 } 859 860 if (stat & 0x01000000) { 861 nvkm_error(subdev, "LB_ERROR\n"); 862 nvkm_wr32(device, 0x002100, 0x01000000); 863 stat &= ~0x01000000; 864 } 865 866 if (stat & 0x08000000) { 867 gk104_fifo_intr_dropped_fault(fifo); 868 nvkm_wr32(device, 0x002100, 0x08000000); 869 stat &= ~0x08000000; 870 } 871 872 if (stat & 0x10000000) { 873 gf100_fifo_intr_mmu_fault(fifo); 874 stat &= ~0x10000000; 875 } 876 877 if (stat & 0x20000000) { 878 if (gf100_fifo_intr_pbdma(fifo)) 879 stat &= ~0x20000000; 880 } 881 882 if (stat & 0x40000000) { 883 gk104_fifo_intr_runlist(fifo); 884 stat &= ~0x40000000; 885 } 886 887 if (stat & 0x80000000) { 888 nvkm_wr32(device, 0x002100, 0x80000000); 889 nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT); 890 stat &= ~0x80000000; 891 } 892 893 if (stat) { 894 nvkm_error(subdev, "INTR %08x\n", stat); 895 spin_lock(&fifo->lock); 896 nvkm_mask(device, 0x002140, stat, 0x00000000); 897 spin_unlock(&fifo->lock); 898 nvkm_wr32(device, 0x002100, stat); 899 } 900 901 return IRQ_HANDLED; 902 } 903 904 void 905 gk104_fifo_fini(struct nvkm_fifo *base) 906 { 907 struct gk104_fifo *fifo = gk104_fifo(base); 908 flush_work(&fifo->recover.work); 909 } 910 911 void 912 gk104_fifo_init_pbdmas(struct nvkm_fifo *fifo, u32 mask) 913 { 914 struct nvkm_device *device = fifo->engine.subdev.device; 915 916 nvkm_wr32(device, 0x000204, mask); 917 nvkm_mask(device, 0x002a04, 0xbfffffff, 0xbfffffff); 918 } 919 920 void 921 gk104_fifo_init(struct nvkm_fifo *base) 922 { 923 struct gk104_fifo *fifo = gk104_fifo(base); 924 struct nvkm_device *device = fifo->base.engine.subdev.device; 925 926 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12); 927 928 nvkm_wr32(device, 0x002100, 0xffffffff); 929 nvkm_wr32(device, 0x002140, 0x7fffffff); 930 } 931 932 int 933 gk104_fifo_runl_ctor(struct nvkm_fifo *fifo) 934 { 935 struct nvkm_device *device = fifo->engine.subdev.device; 936 struct nvkm_top_device *tdev; 937 struct nvkm_runl *runl; 938 struct nvkm_runq *runq; 939 const struct nvkm_engn_func *func; 940 941 nvkm_list_foreach(tdev, &device->top->device, head, tdev->runlist >= 0) { 942 runl = nvkm_runl_get(fifo, tdev->runlist, tdev->runlist); 943 if (!runl) { 944 runl = nvkm_runl_new(fifo, tdev->runlist, tdev->runlist, 0); 945 if (IS_ERR(runl)) 946 return PTR_ERR(runl); 947 948 nvkm_runq_foreach_cond(runq, fifo, gk104_runq_runm(runq) & BIT(runl->id)) { 949 if (WARN_ON(runl->runq_nr == ARRAY_SIZE(runl->runq))) 950 return -ENOMEM; 951 952 runl->runq[runl->runq_nr++] = runq; 953 } 954 955 } 956 957 if (tdev->engine < 0) 958 continue; 959 960 switch (tdev->type) { 961 case NVKM_ENGINE_CE: 962 func = fifo->func->engn_ce; 963 break; 964 case NVKM_ENGINE_GR: 965 nvkm_runl_add(runl, 15, &gf100_engn_sw, NVKM_ENGINE_SW, 0); 966 fallthrough; 967 default: 968 func = fifo->func->engn; 969 break; 970 } 971 972 nvkm_runl_add(runl, tdev->engine, func, tdev->type, tdev->inst); 973 } 974 975 return 0; 976 } 977 978 int 979 gk104_fifo_chid_nr(struct nvkm_fifo *fifo) 980 { 981 return 4096; 982 } 983 984 int 985 gk104_fifo_oneinit(struct nvkm_fifo *base) 986 { 987 struct gk104_fifo *fifo = gk104_fifo(base); 988 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 989 struct nvkm_device *device = subdev->device; 990 struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device); 991 struct nvkm_top_device *tdev; 992 int ret, i, j; 993 994 /* Determine runlist configuration from topology device info. */ 995 list_for_each_entry(tdev, &device->top->device, head) { 996 const int engn = tdev->engine; 997 998 if (engn < 0) 999 continue; 1000 1001 fifo->engine[engn].engine = nvkm_device_engine(device, tdev->type, tdev->inst); 1002 fifo->engine[engn].runl = tdev->runlist; 1003 fifo->engine_nr = max(fifo->engine_nr, engn + 1); 1004 fifo->runlist[tdev->runlist].engm |= BIT(engn); 1005 fifo->runlist[tdev->runlist].engm_sw |= BIT(engn); 1006 if (tdev->type == NVKM_ENGINE_GR) 1007 fifo->runlist[tdev->runlist].engm_sw |= BIT(GK104_FIFO_ENGN_SW); 1008 fifo->runlist_nr = max(fifo->runlist_nr, tdev->runlist + 1); 1009 } 1010 1011 for (i = 0; i < fifo->runlist_nr; i++) { 1012 for (j = 0; j < ARRAY_SIZE(fifo->runlist[i].mem); j++) { 1013 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 1014 fifo->base.nr * 2/* TSG+chan */ * 1015 fifo->func->runlist->size, 1016 0x1000, false, 1017 &fifo->runlist[i].mem[j]); 1018 if (ret) 1019 return ret; 1020 } 1021 1022 INIT_LIST_HEAD(&fifo->runlist[i].cgrp); 1023 INIT_LIST_HEAD(&fifo->runlist[i].chan); 1024 } 1025 1026 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 1027 fifo->base.nr * 0x200, 0x1000, true, 1028 &fifo->user.mem); 1029 if (ret) 1030 return ret; 1031 1032 ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem), 1033 &fifo->user.bar); 1034 if (ret) 1035 return ret; 1036 1037 return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0); 1038 } 1039 1040 void * 1041 gk104_fifo_dtor(struct nvkm_fifo *base) 1042 { 1043 struct gk104_fifo *fifo = gk104_fifo(base); 1044 struct nvkm_device *device = fifo->base.engine.subdev.device; 1045 int i; 1046 1047 nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar); 1048 nvkm_memory_unref(&fifo->user.mem); 1049 1050 for (i = 0; i < fifo->runlist_nr; i++) { 1051 nvkm_memory_unref(&fifo->runlist[i].mem[1]); 1052 nvkm_memory_unref(&fifo->runlist[i].mem[0]); 1053 } 1054 1055 return fifo; 1056 } 1057 1058 int 1059 gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device, 1060 enum nvkm_subdev_type type, int inst, int nr, struct nvkm_fifo **pfifo) 1061 { 1062 struct gk104_fifo *fifo; 1063 1064 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) 1065 return -ENOMEM; 1066 fifo->func = func; 1067 INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work); 1068 *pfifo = &fifo->base; 1069 1070 return nvkm_fifo_ctor(func, device, type, inst, &fifo->base); 1071 } 1072 1073 static const struct nvkm_fifo_func 1074 gk104_fifo = { 1075 .dtor = gk104_fifo_dtor, 1076 .oneinit = gk104_fifo_oneinit, 1077 .chid_nr = gk104_fifo_chid_nr, 1078 .chid_ctor = gf100_fifo_chid_ctor, 1079 .runq_nr = gf100_fifo_runq_nr, 1080 .runl_ctor = gk104_fifo_runl_ctor, 1081 .init = gk104_fifo_init, 1082 .init_pbdmas = gk104_fifo_init_pbdmas, 1083 .fini = gk104_fifo_fini, 1084 .intr = gk104_fifo_intr, 1085 .intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit, 1086 .mmu_fault = &gk104_fifo_mmu_fault, 1087 .engine_id = gk104_fifo_engine_id, 1088 .recover_chan = gk104_fifo_recover_chan, 1089 .runlist = &gk104_fifo_runlist, 1090 .nonstall = &gf100_fifo_nonstall, 1091 .runl = &gk104_runl, 1092 .runq = &gk104_runq, 1093 .engn = &gk104_engn, 1094 .engn_ce = &gk104_engn_ce, 1095 .cgrp = {{ }, &nv04_cgrp }, 1096 .chan = {{ 0, 0, KEPLER_CHANNEL_GPFIFO_A }, &gk104_chan, .ctor = &gk104_fifo_gpfifo_new }, 1097 }; 1098 1099 int 1100 gk104_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, 1101 struct nvkm_fifo **pfifo) 1102 { 1103 return gk104_fifo_new_(&gk104_fifo, device, type, inst, 0, pfifo); 1104 } 1105